repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
lukeolson/clean-latex-to-arxiv | parxiv.py | 1 | 12460 | #! /usr/bin/env python
from __future__ import print_function
import glob
import re
import os
import io
import time
import shutil
import tempfile
import subprocess
import ply.lex
# Python2 FileNotFoundError support
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
"""
usage:
python parxiv.py file.tex
this will make arxiv-somelongdatestring with
- file_strip.tex (where includegraphics paths are stripped)
- file_strip.bbl (you should have the .bbl file already)
- all figures
- the class file if custom
- the bib style if custom
- extra files listed in extra.txt
"""
def strip_comments(source):
"""
from https://gist.github.com/dzhuang/dc34cdd7efa43e5ecc1dc981cc906c85
"""
tokens = (
'PERCENT', 'BEGINCOMMENT', 'ENDCOMMENT',
'BACKSLASH', 'CHAR', 'BEGINVERBATIM',
'ENDVERBATIM', 'NEWLINE', 'ESCPCT',
'MAKEATLETTER', 'MAKEATOTHER',
)
states = (
('makeatblock', 'exclusive'),
('makeatlinecomment', 'exclusive'),
('linecomment', 'exclusive'),
('commentenv', 'exclusive'),
('verbatim', 'exclusive')
)
# Deal with escaped backslashes, so we don't
# think they're escaping %
def t_BACKSLASH(t):
r"\\\\"
return t
# Leaving all % in makeatblock
def t_MAKEATLETTER(t):
r"\\makeatletter"
t.lexer.begin("makeatblock")
return t
# One-line comments
def t_PERCENT(t):
r"\%"
t.lexer.begin("linecomment")
# Escaped percent signs
def t_ESCPCT(t):
r"\\\%"
return t
# Comment environment, as defined by verbatim package
def t_BEGINCOMMENT(t):
r"\\begin\s*{\s*comment\s*}"
t.lexer.begin("commentenv")
#Verbatim environment (different treatment of comments within)
def t_BEGINVERBATIM(t):
r"\\begin\s*{\s*verbatim\s*}"
t.lexer.begin("verbatim")
return t
#Any other character in initial state we leave alone
def t_CHAR(t):
r"."
return t
def t_NEWLINE(t):
r"\n"
return t
# End comment environment
def t_commentenv_ENDCOMMENT(t):
r"\\end\s*{\s*comment\s*}"
#Anything after \end{comment} on a line is ignored!
t.lexer.begin('linecomment')
# Ignore comments of comment environment
def t_commentenv_CHAR(t):
r"."
pass
def t_commentenv_NEWLINE(t):
r"\n"
pass
#End of verbatim environment
def t_verbatim_ENDVERBATIM(t):
r"\\end\s*{\s*verbatim\s*}"
t.lexer.begin('INITIAL')
return t
#Leave contents of verbatim environment alone
def t_verbatim_CHAR(t):
r"."
return t
def t_verbatim_NEWLINE(t):
r"\n"
return t
#End a % comment when we get to a new line
def t_linecomment_ENDCOMMENT(t):
r"\n"
t.lexer.begin("INITIAL")
# Newline at the end of a line comment is presevered.
return t
#Ignore anything after a % on a line
def t_linecomment_CHAR(t):
r"."
pass
def t_makeatblock_MAKEATOTHER(t):
r"\\makeatother"
t.lexer.begin('INITIAL')
return t
def t_makeatblock_BACKSLASH(t):
r"\\\\"
return t
# Escaped percent signs in makeatblock
def t_makeatblock_ESCPCT(t):
r"\\\%"
return t
# presever % in makeatblock
def t_makeatblock_PERCENT(t):
r"\%"
t.lexer.begin("makeatlinecomment")
return t
def t_makeatlinecomment_NEWLINE(t):
r"\n"
t.lexer.begin('makeatblock')
return t
# Leave contents of makeatblock alone
def t_makeatblock_CHAR(t):
r"."
return t
def t_makeatblock_NEWLINE(t):
r"\n"
return t
# For bad characters, we just skip over it
def t_ANY_error(t):
t.lexer.skip(1)
lexer = ply.lex.lex()
lexer.input(source)
return u"".join([tok.value for tok in lexer])
def find_class(source):
"""
(unused)
look for \documentclass[review]{siamart}
then return 'siamart.cls'
"""
classname = re.search(r'\\documentclass.*{(.*)}', source)
if classname:
classname = classname.group(1) + '.cls'
return classname
def find_bibstyle(source):
"""
look for \ bibliographystyle{siamplain}
then return 'siamplain.bst'
"""
bibstylename = re.search(r'\\bibliographystyle{(.*)}', source)
if bibstylename:
bibstylename = bibstylename.group(1) + '.bst'
return bibstylename
def find_figs(source):
"""
look for \graphicspath{{subdir}} (a single subdir)
find figures in \includegraphics[something]{PATH/filename.ext}
\includegraphics{PATH/filename.ext}
make them \includegraphics[something]{PATH-filename.ext}
\includegraphics{PATH-filename.ext}
later: copy figures to arxivdir
"""
findgraphicspath = re.search(r'\\graphicspath{(.*)}', source)
if findgraphicspath:
graphicspaths = findgraphicspath.group(1)
graphicspaths = re.findall('{(.*?)}', graphicspaths)
else:
graphicspaths = []
# keep a list of (figname, figpath)
figlist = []
def repl(m):
figpath = ''
figname = os.path.basename(m.group(2))
figpath = os.path.dirname(m.group(2)).lstrip('./')
if figpath:
newfigname = figpath.replace(' ', '_').replace('/', '_')+'_'+figname
else:
newfigname = figname
newincludegraphics = m.group(1) + newfigname + m.group(3)
figlist.append((figname, figpath, newfigname))
return newincludegraphics
source = re.sub(r'(\\includegraphics.*?{)(.*?)(})', repl, source)
return figlist, source, graphicspaths
def flatten(source):
"""
replace arguments of include{} and intput{}
only input can be nested
include adds a clearpage
includeonly not supported
"""
def repl(m):
inputname = m.group(2)
if not os.path.isfile(inputname):
inputname = inputname + '.tex'
with io.open(inputname, encoding='utf-8') as f:
newtext = f.read()
newtext = re.sub(r'(\\input{)(.*?)(})', repl, newtext)
return newtext
def repl_include(m):
inputname = m.group(2)
if not os.path.isfile(inputname):
inputname = inputname + '.tex'
with io.open(inputname, encoding='utf-8') as f:
newtext = f.read()
newtext = '\\clearpage\n' + newtext
newtext = re.sub(r'(\\input{)(.*?)(})', repl, newtext)
newtext += '\\clearpage\n'
return newtext
dest = re.sub(r'(\\include{)(.*?)(})', repl_include, source, True)
dest = re.sub(r'(\\input{)(.*?)(})', repl, dest)
return dest
def main(fname):
print('[parxiv] reading %s' % fname)
with io.open(fname, encoding='utf-8') as f:
source = f.read()
print('[parxiv] stripping comments')
source = strip_comments(source)
print('[parxiv] flattening source')
source = flatten(source)
print('[parxiv] stripping comments again')
source = strip_comments(source)
print('[parxiv] finding figures...')
figlist, source, graphicspaths = find_figs(source)
# print('[parxiv] finding article class and bib style')
# localbibstyle = find_bibstyle(source)
print('[parxiv] making directory', end='')
dirname = 'arxiv-' + time.strftime('%c').replace(' ', '-')
dirname = dirname.replace(':', '-')
print(' %s' % dirname)
os.makedirs(dirname)
print('[parxiv] copying class/style files')
# shutil.copy2(localclass, os.path.join(dirname, localclass))
# if localbibstyle is not None:
# shutil.copy2(localbibstyle, os.path.join(dirname, localbibstyle))
for bst in glob.glob('*.bst'):
shutil.copy2(bst, os.path.join(dirname, bst))
for sty in glob.glob('*.sty'):
shutil.copy2(sty, os.path.join(dirname, sty))
for cls in glob.glob('*.cls'):
shutil.copy2(cls, os.path.join(dirname, cls))
print('[parxiv] copying figures')
for figname, figpath, newfigname in figlist:
allpaths = graphicspaths
allpaths += ['./']
_, ext = os.path.splitext(figname)
if ext == '':
figname += '.pdf'
newfigname += '.pdf'
if figpath:
allpaths = [os.path.join(p, figpath) for p in allpaths]
for p in allpaths:
#if 'quartz' in newfigname:
# print(p)
src = os.path.join(p, figname)
dest = os.path.join(dirname, os.path.basename(newfigname))
try:
shutil.copy2(src, dest)
except IOError:
# attempts multiple graphics paths
pass
# copy bbl file
print('[parxiv] copying bbl file')
bblfile = fname.replace('.tex', '.bbl')
newbblfile = fname.replace('.tex', '_strip.bbl')
bblflag = False
try:
shutil.copy2(bblfile, os.path.join(dirname, newbblfile))
bblflag = True
except FileNotFoundError:
print(' ...skipping, not found')
# copy extra files
try:
with io.open('extra.txt', encoding='utf-8') as f:
inputsource = f.read()
except IOError:
print('[parxiv] copying no extra files')
else:
print('[parxiv] copying extra file(s): ', end='')
for f in inputsource.split('\n'):
if os.path.isfile(f):
localname = os.path.basename(f)
print(' %s' % localname, end='')
shutil.copy2(f, os.path.join(dirname, localname))
print('\n')
newtexfile = fname.replace('.tex', '_strip.tex')
print('[parxiv] writing %s' % newtexfile)
with io.open(
os.path.join(dirname, newtexfile), 'w') as fout:
fout.write(source)
print('[parxiv] attempting to generate bbl file')
if not bblflag:
# attempt to generate
# with tempfile.TemporaryDirectory() as d:
# python2 support
try:
d = tempfile.mkdtemp()
try:
args = ['pdflatex',
'-interaction', 'nonstopmode',
'-recorder',
'-output-directory', d,
newtexfile]
# python2 support
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'wb')
p = subprocess.Popen(args,
cwd=dirname,
stdin=DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
# copy .bib files
for bib in glob.glob('*.bib'):
shutil.copy2(bib, os.path.join(d, bib))
for bib in glob.glob('*.bst'):
shutil.copy2(bib, os.path.join(d, bib))
args = ['bibtex', newtexfile.replace('.tex', '.aux')]
p = subprocess.Popen(args,
cwd=d,
stdin=DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
except OSError as e:
raise RuntimeError(e)
bblfile = newtexfile.replace('.tex', '.bbl')
if os.path.isfile(os.path.join(d, bblfile)):
print(' ... generated')
shutil.copy2(os.path.join(d, bblfile),
os.path.join(dirname, bblfile))
else:
print(' ... could not generate')
finally:
try:
shutil.rmtree(d)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return source
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('usage: python parxiv.py <filename.tex>')
sys.exit(-1)
fname = sys.argv[1]
source = main(fname)
| mit |
sameenjalal/mavenize-beta | mavenize/apps/item/models.py | 1 | 2128 | from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.exceptions import ObjectDoesNotExist
class Item(models.Model):
item_type = models.CharField(max_length=30, default="")
four_star = models.IntegerField(default=0)
three_star = models.IntegerField(default=0)
two_star = models.IntegerField(default=0)
one_star = models.IntegerField(default=0)
reviews = models.IntegerField(default=0)
bookmarks = models.IntegerField(default=0)
def __unicode__(self):
return str(self.id)
def get_popularity(self):
"""
Returns the Popularity model for this item.
"""
if not hasattr(self, '_popularity_cache'):
try:
self._popularity_cache = Popularity.objects.get(
item__id__exact=self.id)
self._popularity_cache.item = self
except:
raise ObjectDoesNotExist
return self._popularity_cache
def get_rating(self):
return (self.four_star*4 + self.three_star*3 +
self.two_star*2 + self.one_star) / self.get_votes()
def get_votes(self):
return (self.four_star + self.three_star + self.two_star +
self.one_star)
class Link(models.Model):
item = models.ForeignKey(Item)
partner = models.CharField(max_length=20)
url = models.CharField(max_length=200)
def __unicode__(self):
return self.url
class Popularity(models.Model):
item = models.OneToOneField(Item, primary_key=True)
today = models.IntegerField(default=0, db_index=True)
week = models.IntegerField(default=0, db_index=True)
month = models.IntegerField(default=0, db_index=True)
alltime = models.IntegerField(default=0, db_index=True)
class Meta:
verbose_name_plural = "Popularities"
def __unicode__(self):
return "Item #%s: %s" % (self.item.id, self.alltime)
@receiver(post_save, sender=Item)
def create_item(sender, instance, created, **kwargs):
if created:
Popularity.objects.create(item=instance)
| mit |
davenovak/mtasa-blue | vendor/google-breakpad/src/testing/scripts/generator/cpp/gmock_class_test.py | 78 | 7135 | #!/usr/bin/env python
#
# Copyright 2009 Neal Norwitz All Rights Reserved.
# Portions Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gmock.scripts.generator.cpp.gmock_class."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import sys
import unittest
# Allow the cpp imports below to work when run as a standalone script.
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from cpp import ast
from cpp import gmock_class
class TestCase(unittest.TestCase):
"""Helper class that adds assert methods."""
def StripLeadingWhitespace(self, lines):
"""Strip leading whitespace in each line in 'lines'."""
return '\n'.join([s.lstrip() for s in lines.split('\n')])
def assertEqualIgnoreLeadingWhitespace(self, expected_lines, lines):
"""Specialized assert that ignores the indent level."""
self.assertEqual(expected_lines, self.StripLeadingWhitespace(lines))
class GenerateMethodsTest(TestCase):
def GenerateMethodSource(self, cpp_source):
"""Convert C++ source to Google Mock output source lines."""
method_source_lines = []
# <test> is a pseudo-filename, it is not read or written.
builder = ast.BuilderFromSource(cpp_source, '<test>')
ast_list = list(builder.Generate())
gmock_class._GenerateMethods(method_source_lines, cpp_source, ast_list[0])
return '\n'.join(method_source_lines)
def testSimpleMethod(self):
source = """
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstMethod(self):
source = """
class Foo {
public:
virtual void Bar(bool flag) const;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD1(Bar,\nvoid(bool flag));',
self.GenerateMethodSource(source))
def testExplicitVoid(self):
source = """
class Foo {
public:
virtual int Bar(void);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint(void));',
self.GenerateMethodSource(source))
def testStrangeNewlineInParameter(self):
source = """
class Foo {
public:
virtual void Bar(int
a) = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvoid(int a));',
self.GenerateMethodSource(source))
def testDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testMultipleDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testRemovesCommentsWhenDefaultsArePresent(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42 /* a comment */,
char /* other comment */ c= 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testDoubleSlashCommentsInParameterListAreRemoved(self):
source = """
class Foo {
public:
virtual void Bar(int a, // inline comments should be elided.
int b // inline comments should be elided.
) const = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD2(Bar,\nvoid(int a, int b));',
self.GenerateMethodSource(source))
def testCStyleCommentsInParameterListAreNotRemoved(self):
# NOTE(nnorwitz): I'm not sure if it's the best behavior to keep these
# comments. Also note that C style comments after the last parameter
# are still elided.
source = """
class Foo {
public:
virtual const string& Bar(int /* keeper */, int b);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nconst string&(int /* keeper */, int b));',
self.GenerateMethodSource(source))
def testArgsOfTemplateTypes(self):
source = """
class Foo {
public:
virtual int Bar(const vector<int>& v, map<int, string>* output);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\n'
'int(const vector<int>& v, map<int, string>* output));',
self.GenerateMethodSource(source))
def testReturnTypeWithOneTemplateArg(self):
source = """
class Foo {
public:
virtual vector<int>* Bar(int n);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvector<int>*(int n));',
self.GenerateMethodSource(source))
def testReturnTypeWithManyTemplateArgs(self):
source = """
class Foo {
public:
virtual map<int, string> Bar();
};"""
# Comparing the comment text is brittle - we'll think of something
# better in case this gets annoying, but for now let's keep it simple.
self.assertEqualIgnoreLeadingWhitespace(
'// The following line won\'t really compile, as the return\n'
'// type has multiple template arguments. To fix it, use a\n'
'// typedef for the return type.\n'
'MOCK_METHOD0(Bar,\nmap<int, string>());',
self.GenerateMethodSource(source))
class GenerateMocksTest(TestCase):
def GenerateMocks(self, cpp_source):
"""Convert C++ source to complete Google Mock output source."""
# <test> is a pseudo-filename, it is not read or written.
filename = '<test>'
builder = ast.BuilderFromSource(cpp_source, filename)
ast_list = list(builder.Generate())
lines = gmock_class._GenerateMocks(filename, cpp_source, ast_list, None)
return '\n'.join(lines)
def testNamespaces(self):
source = """
namespace Foo {
namespace Bar { class Forward; }
namespace Baz {
class Test {
public:
virtual void Foo();
};
} // namespace Baz
} // namespace Foo
"""
expected = """\
namespace Foo {
namespace Baz {
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
} // namespace Baz
} // namespace Foo
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testClassWithStorageSpecifierMacro(self):
source = """
class STORAGE_SPECIFIER Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
dgu123/yaml-cpp | test/gmock-1.7.0/gtest/test/gtest_xml_output_unittest.py | 1815 | 14580 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is avalable only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| mit |
dkarakats/edx-platform | cms/lib/xblock/test/test_runtime.py | 25 | 2194 | """
Tests of edX Studio runtime functionality
"""
from urlparse import urlparse
from mock import Mock
from unittest import TestCase
from cms.lib.xblock.runtime import handler_url
class TestHandlerUrl(TestCase):
"""Test the LMS handler_url"""
def setUp(self):
self.block = Mock()
def test_trailing_characters(self):
self.assertFalse(handler_url(self.block, 'handler').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler').endswith('/'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix').endswith('/'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix', 'query').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix', 'query').endswith('/'))
self.assertFalse(handler_url(self.block, 'handler', query='query').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler', query='query').endswith('/'))
def _parsed_query(self, query_string):
"""Return the parsed query string from a handler_url generated with the supplied query_string"""
return urlparse(handler_url(self.block, 'handler', query=query_string)).query
def test_query_string(self):
self.assertIn('foo=bar', self._parsed_query('foo=bar'))
self.assertIn('foo=bar&baz=true', self._parsed_query('foo=bar&baz=true'))
self.assertIn('foo&bar&baz', self._parsed_query('foo&bar&baz'))
def _parsed_path(self, handler_name='handler', suffix=''):
"""Return the parsed path from a handler_url with the supplied handler_name and suffix"""
return urlparse(handler_url(self.block, handler_name, suffix=suffix)).path
def test_suffix(self):
self.assertTrue(self._parsed_path(suffix="foo").endswith('foo'))
self.assertTrue(self._parsed_path(suffix="foo/bar").endswith('foo/bar'))
self.assertTrue(self._parsed_path(suffix="/foo/bar").endswith('/foo/bar'))
def test_handler_name(self):
self.assertIn('handler1', self._parsed_path('handler1'))
self.assertIn('handler_a', self._parsed_path('handler_a'))
| agpl-3.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/sympy/polys/groebnertools.py | 78 | 23399 | """Groebner bases algorithms. """
from __future__ import print_function, division
from sympy.polys.monomials import monomial_mul, monomial_lcm, monomial_divides, term_div
from sympy.polys.orderings import lex
from sympy.polys.polyerrors import DomainError
from sympy.polys.polyconfig import query
from sympy.core.symbol import Dummy
from sympy.core.compatibility import range
def groebner(seq, ring, method=None):
"""
Computes Groebner basis for a set of polynomials in `K[X]`.
Wrapper around the (default) improved Buchberger and the other algorithms
for computing Groebner bases. The choice of algorithm can be changed via
``method`` argument or :func:`setup` from :mod:`sympy.polys.polyconfig`,
where ``method`` can be either ``buchberger`` or ``f5b``.
"""
if method is None:
method = query('groebner')
_groebner_methods = {
'buchberger': _buchberger,
'f5b': _f5b,
}
try:
_groebner = _groebner_methods[method]
except KeyError:
raise ValueError("'%s' is not a valid Groebner bases algorithm (valid are 'buchberger' and 'f5b')" % method)
domain, orig = ring.domain, None
if not domain.has_Field or not domain.has_assoc_Field:
try:
orig, ring = ring, ring.clone(domain=domain.get_field())
except DomainError:
raise DomainError("can't compute a Groebner basis over %s" % domain)
else:
seq = [ s.set_ring(ring) for s in seq ]
G = _groebner(seq, ring)
if orig is not None:
G = [ g.clear_denoms()[1].set_ring(orig) for g in G ]
return G
def _buchberger(f, ring):
"""
Computes Groebner basis for a set of polynomials in `K[X]`.
Given a set of multivariate polynomials `F`, finds another
set `G`, such that Ideal `F = Ideal G` and `G` is a reduced
Groebner basis.
The resulting basis is unique and has monic generators if the
ground domains is a field. Otherwise the result is non-unique
but Groebner bases over e.g. integers can be computed (if the
input polynomials are monic).
Groebner bases can be used to choose specific generators for a
polynomial ideal. Because these bases are unique you can check
for ideal equality by comparing the Groebner bases. To see if
one polynomial lies in an ideal, divide by the elements in the
base and see if the remainder vanishes.
They can also be used to solve systems of polynomial equations
as, by choosing lexicographic ordering, you can eliminate one
variable at a time, provided that the ideal is zero-dimensional
(finite number of solutions).
References
==========
1. [Bose03]_
2. [Giovini91]_
3. [Ajwa95]_
4. [Cox97]_
Algorithm used: an improved version of Buchberger's algorithm
as presented in T. Becker, V. Weispfenning, Groebner Bases: A
Computational Approach to Commutative Algebra, Springer, 1993,
page 232.
"""
order = ring.order
domain = ring.domain
monomial_mul = ring.monomial_mul
monomial_div = ring.monomial_div
monomial_lcm = ring.monomial_lcm
def select(P):
# normal selection strategy
# select the pair with minimum LCM(LM(f), LM(g))
pr = min(P, key=lambda pair: order(monomial_lcm(f[pair[0]].LM, f[pair[1]].LM)))
return pr
def normal(g, J):
h = g.rem([ f[j] for j in J ])
if not h:
return None
else:
h = h.monic()
if not h in I:
I[h] = len(f)
f.append(h)
return h.LM, I[h]
def update(G, B, ih):
# update G using the set of critical pairs B and h
# [BW] page 230
h = f[ih]
mh = h.LM
# filter new pairs (h, g), g in G
C = G.copy()
D = set()
while C:
# select a pair (h, g) by popping an element from C
ig = C.pop()
g = f[ig]
mg = g.LM
LCMhg = monomial_lcm(mh, mg)
def lcm_divides(ip):
# LCM(LM(h), LM(p)) divides LCM(LM(h), LM(g))
m = monomial_lcm(mh, f[ip].LM)
return monomial_div(LCMhg, m)
# HT(h) and HT(g) disjoint: mh*mg == LCMhg
if monomial_mul(mh, mg) == LCMhg or (
not any(lcm_divides(ipx) for ipx in C) and
not any(lcm_divides(pr[1]) for pr in D)):
D.add((ih, ig))
E = set()
while D:
# select h, g from D (h the same as above)
ih, ig = D.pop()
mg = f[ig].LM
LCMhg = monomial_lcm(mh, mg)
if not monomial_mul(mh, mg) == LCMhg:
E.add((ih, ig))
# filter old pairs
B_new = set()
while B:
# select g1, g2 from B (-> CP)
ig1, ig2 = B.pop()
mg1 = f[ig1].LM
mg2 = f[ig2].LM
LCM12 = monomial_lcm(mg1, mg2)
# if HT(h) does not divide lcm(HT(g1), HT(g2))
if not monomial_div(LCM12, mh) or \
monomial_lcm(mg1, mh) == LCM12 or \
monomial_lcm(mg2, mh) == LCM12:
B_new.add((ig1, ig2))
B_new |= E
# filter polynomials
G_new = set()
while G:
ig = G.pop()
mg = f[ig].LM
if not monomial_div(mg, mh):
G_new.add(ig)
G_new.add(ih)
return G_new, B_new
# end of update ################################
if not f:
return []
# replace f with a reduced list of initial polynomials; see [BW] page 203
f1 = f[:]
while True:
f = f1[:]
f1 = []
for i in range(len(f)):
p = f[i]
r = p.rem(f[:i])
if r:
f1.append(r.monic())
if f == f1:
break
I = {} # ip = I[p]; p = f[ip]
F = set() # set of indices of polynomials
G = set() # set of indices of intermediate would-be Groebner basis
CP = set() # set of pairs of indices of critical pairs
for i, h in enumerate(f):
I[h] = i
F.add(i)
#####################################
# algorithm GROEBNERNEWS2 in [BW] page 232
while F:
# select p with minimum monomial according to the monomial ordering
h = min([f[x] for x in F], key=lambda f: order(f.LM))
ih = I[h]
F.remove(ih)
G, CP = update(G, CP, ih)
# count the number of critical pairs which reduce to zero
reductions_to_zero = 0
while CP:
ig1, ig2 = select(CP)
CP.remove((ig1, ig2))
h = spoly(f[ig1], f[ig2], ring)
# ordering divisors is on average more efficient [Cox] page 111
G1 = sorted(G, key=lambda g: order(f[g].LM))
ht = normal(h, G1)
if ht:
G, CP = update(G, CP, ht[1])
else:
reductions_to_zero += 1
######################################
# now G is a Groebner basis; reduce it
Gr = set()
for ig in G:
ht = normal(f[ig], G - set([ig]))
if ht:
Gr.add(ht[1])
Gr = [f[ig] for ig in Gr]
# order according to the monomial ordering
Gr = sorted(Gr, key=lambda f: order(f.LM), reverse=True)
return Gr
def spoly(p1, p2, ring):
"""
Compute LCM(LM(p1), LM(p2))/LM(p1)*p1 - LCM(LM(p1), LM(p2))/LM(p2)*p2
This is the S-poly provided p1 and p2 are monic
"""
LM1 = p1.LM
LM2 = p2.LM
LCM12 = ring.monomial_lcm(LM1, LM2)
m1 = ring.monomial_div(LCM12, LM1)
m2 = ring.monomial_div(LCM12, LM2)
s1 = p1.mul_monom(m1)
s2 = p2.mul_monom(m2)
s = s1 - s2
return s
# F5B
# convenience functions
def Sign(f):
return f[0]
def Polyn(f):
return f[1]
def Num(f):
return f[2]
def sig(monomial, index):
return (monomial, index)
def lbp(signature, polynomial, number):
return (signature, polynomial, number)
# signature functions
def sig_cmp(u, v, order):
"""
Compare two signatures by extending the term order to K[X]^n.
u < v iff
- the index of v is greater than the index of u
or
- the index of v is equal to the index of u and u[0] < v[0] w.r.t. order
u > v otherwise
"""
if u[1] > v[1]:
return -1
if u[1] == v[1]:
#if u[0] == v[0]:
# return 0
if order(u[0]) < order(v[0]):
return -1
return 1
def sig_key(s, order):
"""
Key for comparing two signatures.
s = (m, k), t = (n, l)
s < t iff [k > l] or [k == l and m < n]
s > t otherwise
"""
return (-s[1], order(s[0]))
def sig_mult(s, m):
"""
Multiply a signature by a monomial.
The product of a signature (m, i) and a monomial n is defined as
(m * t, i).
"""
return sig(monomial_mul(s[0], m), s[1])
# labeled polynomial functions
def lbp_sub(f, g):
"""
Subtract labeled polynomial g from f.
The signature and number of the difference of f and g are signature
and number of the maximum of f and g, w.r.t. lbp_cmp.
"""
if sig_cmp(Sign(f), Sign(g), Polyn(f).ring.order) < 0:
max_poly = g
else:
max_poly = f
ret = Polyn(f) - Polyn(g)
return lbp(Sign(max_poly), ret, Num(max_poly))
def lbp_mul_term(f, cx):
"""
Multiply a labeled polynomial with a term.
The product of a labeled polynomial (s, p, k) by a monomial is
defined as (m * s, m * p, k).
"""
return lbp(sig_mult(Sign(f), cx[0]), Polyn(f).mul_term(cx), Num(f))
def lbp_cmp(f, g):
"""
Compare two labeled polynomials.
f < g iff
- Sign(f) < Sign(g)
or
- Sign(f) == Sign(g) and Num(f) > Num(g)
f > g otherwise
"""
if sig_cmp(Sign(f), Sign(g), Polyn(f).ring.order) == -1:
return -1
if Sign(f) == Sign(g):
if Num(f) > Num(g):
return -1
#if Num(f) == Num(g):
# return 0
return 1
def lbp_key(f):
"""
Key for comparing two labeled polynomials.
"""
return (sig_key(Sign(f), Polyn(f).ring.order), -Num(f))
# algorithm and helper functions
def critical_pair(f, g, ring):
"""
Compute the critical pair corresponding to two labeled polynomials.
A critical pair is a tuple (um, f, vm, g), where um and vm are
terms such that um * f - vm * g is the S-polynomial of f and g (so,
wlog assume um * f > vm * g).
For performance sake, a critical pair is represented as a tuple
(Sign(um * f), um, f, Sign(vm * g), vm, g), since um * f creates
a new, relatively expensive object in memory, whereas Sign(um *
f) and um are lightweight and f (in the tuple) is a reference to
an already existing object in memory.
"""
domain = ring.domain
ltf = Polyn(f).LT
ltg = Polyn(g).LT
lt = (monomial_lcm(ltf[0], ltg[0]), domain.one)
um = term_div(lt, ltf, domain)
vm = term_div(lt, ltg, domain)
# The full information is not needed (now), so only the product
# with the leading term is considered:
fr = lbp_mul_term(lbp(Sign(f), Polyn(f).leading_term(), Num(f)), um)
gr = lbp_mul_term(lbp(Sign(g), Polyn(g).leading_term(), Num(g)), vm)
# return in proper order, such that the S-polynomial is just
# u_first * f_first - u_second * f_second:
if lbp_cmp(fr, gr) == -1:
return (Sign(gr), vm, g, Sign(fr), um, f)
else:
return (Sign(fr), um, f, Sign(gr), vm, g)
def cp_cmp(c, d):
"""
Compare two critical pairs c and d.
c < d iff
- lbp(c[0], _, Num(c[2]) < lbp(d[0], _, Num(d[2])) (this
corresponds to um_c * f_c and um_d * f_d)
or
- lbp(c[0], _, Num(c[2]) >< lbp(d[0], _, Num(d[2])) and
lbp(c[3], _, Num(c[5])) < lbp(d[3], _, Num(d[5])) (this
corresponds to vm_c * g_c and vm_d * g_d)
c > d otherwise
"""
zero = Polyn(c[2]).ring.zero
c0 = lbp(c[0], zero, Num(c[2]))
d0 = lbp(d[0], zero, Num(d[2]))
r = lbp_cmp(c0, d0)
if r == -1:
return -1
if r == 0:
c1 = lbp(c[3], zero, Num(c[5]))
d1 = lbp(d[3], zero, Num(d[5]))
r = lbp_cmp(c1, d1)
if r == -1:
return -1
#if r == 0:
# return 0
return 1
def cp_key(c, ring):
"""
Key for comparing critical pairs.
"""
return (lbp_key(lbp(c[0], ring.zero, Num(c[2]))), lbp_key(lbp(c[3], ring.zero, Num(c[5]))))
def s_poly(cp):
"""
Compute the S-polynomial of a critical pair.
The S-polynomial of a critical pair cp is cp[1] * cp[2] - cp[4] * cp[5].
"""
return lbp_sub(lbp_mul_term(cp[2], cp[1]), lbp_mul_term(cp[5], cp[4]))
def is_rewritable_or_comparable(sign, num, B):
"""
Check if a labeled polynomial is redundant by checking if its
signature and number imply rewritability or comparability.
(sign, num) is comparable if there exists a labeled polynomial
h in B, such that sign[1] (the index) is less than Sign(h)[1]
and sign[0] is divisible by the leading monomial of h.
(sign, num) is rewritable if there exists a labeled polynomial
h in B, such thatsign[1] is equal to Sign(h)[1], num < Num(h)
and sign[0] is divisible by Sign(h)[0].
"""
for h in B:
# comparable
if sign[1] < Sign(h)[1]:
if monomial_divides(Polyn(h).LM, sign[0]):
return True
# rewritable
if sign[1] == Sign(h)[1]:
if num < Num(h):
if monomial_divides(Sign(h)[0], sign[0]):
return True
return False
def f5_reduce(f, B):
"""
F5-reduce a labeled polynomial f by B.
Continously searches for non-zero labeled polynomial h in B, such
that the leading term lt_h of h divides the leading term lt_f of
f and Sign(lt_h * h) < Sign(f). If such a labeled polynomial h is
found, f gets replaced by f - lt_f / lt_h * h. If no such h can be
found or f is 0, f is no further F5-reducible and f gets returned.
A polynomial that is reducible in the usual sense need not be
F5-reducible, e.g.:
>>> from sympy.polys.groebnertools import lbp, sig, f5_reduce, Polyn
>>> from sympy.polys import ring, QQ, lex
>>> R, x,y,z = ring("x,y,z", QQ, lex)
>>> f = lbp(sig((1, 1, 1), 4), x, 3)
>>> g = lbp(sig((0, 0, 0), 2), x, 2)
>>> Polyn(f).rem([Polyn(g)])
0
>>> f5_reduce(f, [g])
(((1, 1, 1), 4), x, 3)
"""
order = Polyn(f).ring.order
domain = Polyn(f).ring.domain
if not Polyn(f):
return f
while True:
g = f
for h in B:
if Polyn(h):
if monomial_divides(Polyn(h).LM, Polyn(f).LM):
t = term_div(Polyn(f).LT, Polyn(h).LT, domain)
if sig_cmp(sig_mult(Sign(h), t[0]), Sign(f), order) < 0:
# The following check need not be done and is in general slower than without.
#if not is_rewritable_or_comparable(Sign(gp), Num(gp), B):
hp = lbp_mul_term(h, t)
f = lbp_sub(f, hp)
break
if g == f or not Polyn(f):
return f
def _f5b(F, ring):
"""
Computes a reduced Groebner basis for the ideal generated by F.
f5b is an implementation of the F5B algorithm by Yao Sun and
Dingkang Wang. Similarly to Buchberger's algorithm, the algorithm
proceeds by computing critical pairs, computing the S-polynomial,
reducing it and adjoining the reduced S-polynomial if it is not 0.
Unlike Buchberger's algorithm, each polynomial contains additional
information, namely a signature and a number. The signature
specifies the path of computation (i.e. from which polynomial in
the original basis was it derived and how), the number says when
the polynomial was added to the basis. With this information it
is (often) possible to decide if an S-polynomial will reduce to
0 and can be discarded.
Optimizations include: Reducing the generators before computing
a Groebner basis, removing redundant critical pairs when a new
polynomial enters the basis and sorting the critical pairs and
the current basis.
Once a Groebner basis has been found, it gets reduced.
** References **
Yao Sun, Dingkang Wang: "A New Proof for the Correctness of F5
(F5-Like) Algorithm", http://arxiv.org/abs/1004.0084 (specifically
v4)
Thomas Becker, Volker Weispfenning, Groebner bases: A computational
approach to commutative algebra, 1993, p. 203, 216
"""
order = ring.order
domain = ring.domain
# reduce polynomials (like in Mario Pernici's implementation) (Becker, Weispfenning, p. 203)
B = F
while True:
F = B
B = []
for i in range(len(F)):
p = F[i]
r = p.rem(F[:i])
if r:
B.append(r)
if F == B:
break
# basis
B = [lbp(sig(ring.zero_monom, i + 1), F[i], i + 1) for i in range(len(F))]
B.sort(key=lambda f: order(Polyn(f).LM), reverse=True)
# critical pairs
CP = [critical_pair(B[i], B[j], ring) for i in range(len(B)) for j in range(i + 1, len(B))]
CP.sort(key=lambda cp: cp_key(cp, ring), reverse=True)
k = len(B)
reductions_to_zero = 0
while len(CP):
cp = CP.pop()
# discard redundant critical pairs:
if is_rewritable_or_comparable(cp[0], Num(cp[2]), B):
continue
if is_rewritable_or_comparable(cp[3], Num(cp[5]), B):
continue
s = s_poly(cp)
p = f5_reduce(s, B)
p = lbp(Sign(p), Polyn(p).monic(), k + 1)
if Polyn(p):
# remove old critical pairs, that become redundant when adding p:
indices = []
for i, cp in enumerate(CP):
if is_rewritable_or_comparable(cp[0], Num(cp[2]), [p]):
indices.append(i)
elif is_rewritable_or_comparable(cp[3], Num(cp[5]), [p]):
indices.append(i)
for i in reversed(indices):
del CP[i]
# only add new critical pairs that are not made redundant by p:
for g in B:
if Polyn(g):
cp = critical_pair(p, g, ring)
if is_rewritable_or_comparable(cp[0], Num(cp[2]), [p]):
continue
elif is_rewritable_or_comparable(cp[3], Num(cp[5]), [p]):
continue
CP.append(cp)
# sort (other sorting methods/selection strategies were not as successful)
CP.sort(key=lambda cp: cp_key(cp, ring), reverse=True)
# insert p into B:
m = Polyn(p).LM
if order(m) <= order(Polyn(B[-1]).LM):
B.append(p)
else:
for i, q in enumerate(B):
if order(m) > order(Polyn(q).LM):
B.insert(i, p)
break
k += 1
#print(len(B), len(CP), "%d critical pairs removed" % len(indices))
else:
reductions_to_zero += 1
# reduce Groebner basis:
H = [Polyn(g).monic() for g in B]
H = red_groebner(H, ring)
return sorted(H, key=lambda f: order(f.LM), reverse=True)
def red_groebner(G, ring):
"""
Compute reduced Groebner basis, from BeckerWeispfenning93, p. 216
Selects a subset of generators, that already generate the ideal
and computes a reduced Groebner basis for them.
"""
def reduction(P):
"""
The actual reduction algorithm.
"""
Q = []
for i, p in enumerate(P):
h = p.rem(P[:i] + P[i + 1:])
if h:
Q.append(h)
return [p.monic() for p in Q]
F = G
H = []
while F:
f0 = F.pop()
if not any(monomial_divides(f.LM, f0.LM) for f in F + H):
H.append(f0)
# Becker, Weispfenning, p. 217: H is Groebner basis of the ideal generated by G.
return reduction(H)
def is_groebner(G, ring):
"""
Check if G is a Groebner basis.
"""
for i in range(len(G)):
for j in range(i + 1, len(G)):
s = spoly(G[i], G[j])
s = s.rem(G)
if s:
return False
return True
def is_minimal(G, ring):
"""
Checks if G is a minimal Groebner basis.
"""
order = ring.order
domain = ring.domain
G.sort(key=lambda g: order(g.LM))
for i, g in enumerate(G):
if g.LC != domain.one:
return False
for h in G[:i] + G[i + 1:]:
if monomial_divides(h.LM, g.LM):
return False
return True
def is_reduced(G, ring):
"""
Checks if G is a reduced Groebner basis.
"""
order = ring.order
domain = ring.domain
G.sort(key=lambda g: order(g.LM))
for i, g in enumerate(G):
if g.LC != domain.one:
return False
for term in g:
for h in G[:i] + G[i + 1:]:
if monomial_divides(h.LM, term[0]):
return False
return True
def groebner_lcm(f, g):
"""
Computes LCM of two polynomials using Groebner bases.
The LCM is computed as the unique generater of the intersection
of the two ideals generated by `f` and `g`. The approach is to
compute a Groebner basis with respect to lexicographic ordering
of `t*f` and `(1 - t)*g`, where `t` is an unrelated variable and
then filtering out the solution that doesn't contain `t`.
References
==========
1. [Cox97]_
"""
if f.ring != g.ring:
raise ValueError("Values should be equal")
ring = f.ring
domain = ring.domain
if not f or not g:
return ring.zero
if len(f) <= 1 and len(g) <= 1:
monom = monomial_lcm(f.LM, g.LM)
coeff = domain.lcm(f.LC, g.LC)
return ring.term_new(monom, coeff)
fc, f = f.primitive()
gc, g = g.primitive()
lcm = domain.lcm(fc, gc)
f_terms = [ ((1,) + monom, coeff) for monom, coeff in f.terms() ]
g_terms = [ ((0,) + monom, coeff) for monom, coeff in g.terms() ] \
+ [ ((1,) + monom,-coeff) for monom, coeff in g.terms() ]
t = Dummy("t")
t_ring = ring.clone(symbols=(t,) + ring.symbols, order=lex)
F = t_ring.from_terms(f_terms)
G = t_ring.from_terms(g_terms)
basis = groebner([F, G], t_ring)
def is_independent(h, j):
return all(not monom[j] for monom in h.monoms())
H = [ h for h in basis if is_independent(h, 0) ]
h_terms = [ (monom[1:], coeff*lcm) for monom, coeff in H[0].terms() ]
h = ring.from_terms(h_terms)
return h
def groebner_gcd(f, g):
"""Computes GCD of two polynomials using Groebner bases. """
if f.ring != g.ring:
raise ValueError("Values should be equal")
domain = f.ring.domain
if not domain.has_Field:
fc, f = f.primitive()
gc, g = g.primitive()
gcd = domain.gcd(fc, gc)
H = (f*g).quo([groebner_lcm(f, g)])
if len(H) != 1:
raise ValueError("Length should be 1")
h = H[0]
if not domain.has_Field:
return gcd*h
else:
return h.monic()
| mit |
skosukhin/spack | lib/spack/spack/schema/__init__.py | 1 | 1512 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""This module contains jsonschema files for all of Spack's YAML formats.
"""
from llnl.util.lang import list_modules
# Automatically bring in all sub-modules
__all__ = []
for mod in list_modules(__path__[0]):
__import__('%s.%s' % (__name__, mod))
__all__.append(mod)
| lgpl-2.1 |
tchellomello/home-assistant | homeassistant/components/ecovacs/vacuum.py | 16 | 5879 | """Support for Ecovacs Ecovacs Vaccums."""
import logging
import sucks
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_STATUS,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
VacuumEntity,
)
from homeassistant.helpers.icon import icon_for_battery_level
from . import ECOVACS_DEVICES
_LOGGER = logging.getLogger(__name__)
SUPPORT_ECOVACS = (
SUPPORT_BATTERY
| SUPPORT_RETURN_HOME
| SUPPORT_CLEAN_SPOT
| SUPPORT_STOP
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
| SUPPORT_LOCATE
| SUPPORT_STATUS
| SUPPORT_SEND_COMMAND
| SUPPORT_FAN_SPEED
)
ATTR_ERROR = "error"
ATTR_COMPONENT_PREFIX = "component_"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Ecovacs vacuums."""
vacuums = []
for device in hass.data[ECOVACS_DEVICES]:
vacuums.append(EcovacsVacuum(device))
_LOGGER.debug("Adding Ecovacs Vacuums to Home Assistant: %s", vacuums)
add_entities(vacuums, True)
class EcovacsVacuum(VacuumEntity):
"""Ecovacs Vacuums such as Deebot."""
def __init__(self, device):
"""Initialize the Ecovacs Vacuum."""
self.device = device
self.device.connect_and_wait_until_ready()
if self.device.vacuum.get("nick") is not None:
self._name = str(self.device.vacuum["nick"])
else:
# In case there is no nickname defined, use the device id
self._name = str(format(self.device.vacuum["did"]))
self._fan_speed = None
self._error = None
_LOGGER.debug("Vacuum initialized: %s", self.name)
async def async_added_to_hass(self) -> None:
"""Set up the event listeners now that hass is ready."""
self.device.statusEvents.subscribe(lambda _: self.schedule_update_ha_state())
self.device.batteryEvents.subscribe(lambda _: self.schedule_update_ha_state())
self.device.lifespanEvents.subscribe(lambda _: self.schedule_update_ha_state())
self.device.errorEvents.subscribe(self.on_error)
def on_error(self, error):
"""Handle an error event from the robot.
This will not change the entity's state. If the error caused the state
to change, that will come through as a separate on_status event
"""
if error == "no_error":
self._error = None
else:
self._error = error
self.hass.bus.fire(
"ecovacs_error", {"entity_id": self.entity_id, "error": error}
)
self.schedule_update_ha_state()
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state."""
return False
@property
def unique_id(self) -> str:
"""Return an unique ID."""
return self.device.vacuum.get("did")
@property
def is_on(self):
"""Return true if vacuum is currently cleaning."""
return self.device.is_cleaning
@property
def is_charging(self):
"""Return true if vacuum is currently charging."""
return self.device.is_charging
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_ECOVACS
@property
def status(self):
"""Return the status of the vacuum cleaner."""
return self.device.vacuum_status
def return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
self.device.run(sucks.Charge())
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
return icon_for_battery_level(
battery_level=self.battery_level, charging=self.is_charging
)
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
if self.device.battery_status is not None:
return self.device.battery_status * 100
return super().battery_level
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
return self.device.fan_speed
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return [sucks.FAN_SPEED_NORMAL, sucks.FAN_SPEED_HIGH]
def turn_on(self, **kwargs):
"""Turn the vacuum on and start cleaning."""
self.device.run(sucks.Clean())
def turn_off(self, **kwargs):
"""Turn the vacuum off stopping the cleaning and returning home."""
self.return_to_base()
def stop(self, **kwargs):
"""Stop the vacuum cleaner."""
self.device.run(sucks.Stop())
def clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
self.device.run(sucks.Spot())
def locate(self, **kwargs):
"""Locate the vacuum cleaner."""
self.device.run(sucks.PlaySound())
def set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if self.is_on:
self.device.run(sucks.Clean(mode=self.device.clean_status, speed=fan_speed))
def send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner."""
self.device.run(sucks.VacBotCommand(command, params))
@property
def device_state_attributes(self):
"""Return the device-specific state attributes of this vacuum."""
data = {}
data[ATTR_ERROR] = self._error
for key, val in self.device.components.items():
attr_name = ATTR_COMPONENT_PREFIX + key
data[attr_name] = int(val * 100)
return data
| apache-2.0 |
solashirai/edx-platform | lms/djangoapps/commerce/tests/test_views.py | 9 | 4246 | """ Tests for commerce views. """
import json
from uuid import uuid4
from nose.plugins.attrib import attr
import ddt
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
import mock
from student.tests.factories import UserFactory
from openedx.core.djangoapps.theming.test_util import with_is_edx_domain
class UserMixin(object):
""" Mixin for tests involving users. """
def setUp(self):
super(UserMixin, self).setUp()
self.user = UserFactory()
def _login(self):
""" Log into LMS. """
self.client.login(username=self.user.username, password='test')
@attr('shard_1')
@ddt.ddt
class ReceiptViewTests(UserMixin, TestCase):
""" Tests for the receipt view. """
def test_login_required(self):
""" The view should redirect to the login page if the user is not logged in. """
self.client.logout()
response = self.client.post(reverse('commerce:checkout_receipt'))
self.assertEqual(response.status_code, 302)
def post_to_receipt_page(self, post_data):
""" DRY helper """
response = self.client.post(reverse('commerce:checkout_receipt'), params={'basket_id': 1}, data=post_data)
self.assertEqual(response.status_code, 200)
return response
@ddt.data('decision', 'reason_code', 'signed_field_names', None)
def test_is_cybersource(self, post_key):
"""
Ensure the view uses three specific POST keys to detect a request initiated by Cybersource.
"""
self._login()
post_data = {'decision': 'REJECT', 'reason_code': '200', 'signed_field_names': 'dummy'}
if post_key is not None:
# a key will be missing; we will not expect the receipt page to handle a cybersource decision
del post_data[post_key]
expected_pattern = r"<title>(\s+)Receipt"
else:
expected_pattern = r"<title>(\s+)Payment Failed"
response = self.post_to_receipt_page(post_data)
self.assertRegexpMatches(response.content, expected_pattern)
@ddt.data('ACCEPT', 'REJECT', 'ERROR')
def test_cybersource_decision(self, decision):
"""
Ensure the view renders a page appropriately depending on the Cybersource decision.
"""
self._login()
post_data = {'decision': decision, 'reason_code': '200', 'signed_field_names': 'dummy'}
expected_pattern = r"<title>(\s+)Receipt" if decision == 'ACCEPT' else r"<title>(\s+)Payment Failed"
response = self.post_to_receipt_page(post_data)
self.assertRegexpMatches(response.content, expected_pattern)
@ddt.data(True, False)
@mock.patch('commerce.views.is_user_payment_error')
def test_cybersource_message(self, is_user_message_expected, mock_is_user_payment_error):
"""
Ensure that the page displays the right message for the reason_code (it
may be a user error message or a system error message).
"""
mock_is_user_payment_error.return_value = is_user_message_expected
self._login()
response = self.post_to_receipt_page({'decision': 'REJECT', 'reason_code': '99', 'signed_field_names': 'dummy'})
self.assertTrue(mock_is_user_payment_error.called)
self.assertTrue(mock_is_user_payment_error.call_args[0][0], '99')
user_message = "There was a problem with this transaction"
system_message = "A system error occurred while processing your payment"
self.assertRegexpMatches(response.content, user_message if is_user_message_expected else system_message)
self.assertNotRegexpMatches(response.content, user_message if not is_user_message_expected else system_message)
@with_is_edx_domain(True)
def test_hide_nav_header(self):
self._login()
post_data = {'decision': 'ACCEPT', 'reason_code': '200', 'signed_field_names': 'dummy'}
response = self.post_to_receipt_page(post_data)
# Verify that the header navigation links are hidden for the edx.org version
self.assertNotContains(response, "How it Works")
self.assertNotContains(response, "Find courses")
self.assertNotContains(response, "Schools & Partners")
| agpl-3.0 |
shifvb/hash_photos | _gui/main_gui.py | 1 | 9930 | import os
import time
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
from tkinter.font import Font
from _gui.get_geometry import get_center_geometry
from _tools.get_hash import get_hash
from _tools.is_valid_file import is_vaild_file
from _tools.move_file import move_file
__version__ = (1, 2, 1)
class HashPhotoApp(object):
def __init__(self):
self.root = tk.Tk()
self.select_dir_entry_var = tk.StringVar()
self.choose_hash_method_var = tk.IntVar(value=0)
self.log_text_area_var = tk.StringVar()
# 文件类型全局变量
self.jpg_check_btn_var = tk.IntVar(value=1)
self.png_check_btn_var = tk.IntVar(value=1)
self.bmp_check_btn_var = tk.IntVar(value=1)
self.gif_check_btn_var = tk.IntVar(value=1)
self.big_font = Font(size=25, )
self.mid_font = Font(size=16, )
self.init_gui()
tk.mainloop()
def init_gui(self):
self.root.geometry(get_center_geometry(self.root))
self.root.title("hash photos v{}.{}.{}".format(*__version__))
# 0. 选择要重命名图片/文件的文件夹
select_dir_frame = tk.Frame(self.root)
select_dir_frame.grid(row=0, column=0, columnspan=2)
select_dir_entry = tk.Entry(select_dir_frame, width=59, textvariable=self.select_dir_entry_var)
select_dir_entry.configure(font=self.mid_font)
select_dir_entry.grid(row=0, column=0, padx=5)
select_dir_btn = tk.Button(select_dir_frame, text="select dir", command=self.select_dir_btn_callback)
select_dir_btn.configure(font=self.mid_font) #
select_dir_btn.grid(row=0, column=1)
# 1. 选择哈希方法面板
choose_hash_method_frame = tk.LabelFrame(self.root, text="choose hash method", font=self.mid_font)
choose_hash_method_frame.grid(row=1, column=0)
md5_radio_btn = tk.Radiobutton(choose_hash_method_frame, variable=self.choose_hash_method_var, value=0)
md5_radio_btn.pack(side=tk.LEFT)
md5_label = tk.Label(choose_hash_method_frame, text="md5", font=self.mid_font, )
md5_label.bind("<Button-1>", lambda *args: self.choose_hash_method_var.set(0))
md5_label.pack(side=tk.LEFT)
sha1_radio_btn = tk.Radiobutton(choose_hash_method_frame, variable=self.choose_hash_method_var, value=1)
sha1_radio_btn.pack(side=tk.LEFT)
sha1_label = tk.Label(choose_hash_method_frame, text="sha1", font=self.mid_font)
sha1_label.bind("<Button-1>", lambda *args: self.choose_hash_method_var.set(1))
sha1_label.pack(side=tk.LEFT)
sha256_radio_btn = tk.Radiobutton(choose_hash_method_frame, variable=self.choose_hash_method_var, value=2)
sha256_radio_btn.pack(side=tk.LEFT)
sha256_label = tk.Label(choose_hash_method_frame, text="sha256", font=self.mid_font)
sha256_label.bind("<Button-1>", lambda *args: self.choose_hash_method_var.set(2))
sha256_label.pack(side=tk.LEFT)
sha512_radio_btn = tk.Radiobutton(choose_hash_method_frame, variable=self.choose_hash_method_var, value=3)
sha512_radio_btn.pack(side=tk.LEFT)
sha512_label = tk.Label(choose_hash_method_frame, text="sha512", font=self.mid_font)
sha512_label.bind("<Button-1>", lambda *args: self.choose_hash_method_var.set(3))
sha512_label.pack(side=tk.LEFT)
# 2. 选择重命名文件类型面板
choose_file_type_frame = tk.LabelFrame(self.root, text="choose file type", font=self.mid_font)
choose_file_type_frame.grid(row=1, column=1)
jpg_check_btn = tk.Checkbutton(choose_file_type_frame, variable=self.jpg_check_btn_var)
jpg_check_btn.pack(side=tk.LEFT)
jpg_label = tk.Label(choose_file_type_frame, text="jpg/jpeg", font=self.mid_font)
jpg_label.bind("<Button-1>", lambda *args: self.jpg_check_btn_var.set(1 - self.jpg_check_btn_var.get()))
jpg_label.pack(side=tk.LEFT)
png_check_btn = tk.Checkbutton(choose_file_type_frame, variable=self.png_check_btn_var)
png_check_btn.pack(side=tk.LEFT)
png_label = tk.Label(choose_file_type_frame, text="png", font=self.mid_font)
png_label.bind("<Button-1>", lambda *args: self.png_check_btn_var.set(1 - self.png_check_btn_var.get()))
png_label.pack(side=tk.LEFT)
bmp_check_btn = tk.Checkbutton(choose_file_type_frame, variable=self.bmp_check_btn_var)
bmp_check_btn.pack(side=tk.LEFT)
bmp_label = tk.Label(choose_file_type_frame, text="bmp", font=self.mid_font)
bmp_label.bind("<Button-1>", lambda *args: self.bmp_check_btn_var.set(1 - self.bmp_check_btn_var.get()))
bmp_label.pack(side=tk.LEFT)
gif_check_btn = tk.Checkbutton(choose_file_type_frame, variable=self.gif_check_btn_var)
gif_check_btn.pack(side=tk.LEFT)
gif_label = tk.Label(choose_file_type_frame, text="gif", font=self.mid_font)
gif_label.bind("<Button-1>", lambda *args: self.gif_check_btn_var.set(1 - self.gif_check_btn_var.get()))
gif_label.pack(side=tk.LEFT)
# 显示当前状态
log_frame = tk.Frame(self.root)
log_frame.grid(row=2, column=0, columnspan=2, sticky=tk.NSEW)
self.log_text_area = tk.Text(log_frame, state=tk.DISABLED, width=70, height=21, font=self.mid_font)
self.log_text_area.configure(wrap='none')
self.log_text_area.grid(row=0, column=0, sticky=tk.NSEW)
log_vert_scrollbar = tk.Scrollbar(log_frame)
log_vert_scrollbar.grid(row=0, column=1, sticky=tk.NS)
log_vert_scrollbar.configure(command=self.log_text_area.yview)
self.log_text_area.configure(yscrollcommand=log_vert_scrollbar.set)
log_hori_scorllbar = tk.Scrollbar(log_frame, orient=tk.HORIZONTAL)
log_hori_scorllbar.grid(row=1, column=0, sticky=tk.EW)
log_hori_scorllbar.configure(command=self.log_text_area.xview)
self.log_text_area.configure(xscrollcommand=log_hori_scorllbar.set)
# 日志底下的按钮
buttons_frame = tk.Frame(self.root)
buttons_frame.grid(row=3, column=0, sticky=tk.EW, columnspan=2)
_padx = 65
_width = 10
# 进行转换
rename_btn = tk.Button(buttons_frame, text="run", command=self.rename_file_btn_callback)
rename_btn.config(font=self.mid_font, width=_width)
rename_btn.grid(row=0, column=0, padx=_padx, )
# 复制日志
copy_log_btn = tk.Button(buttons_frame, text="copy log", command=self.copy_log_btn_callback)
copy_log_btn.config(font=self.mid_font, width=_width)
copy_log_btn.grid(row=0, column=1, padx=_padx)
# 清除日志
clear_log_btn = tk.Button(buttons_frame, text="clear log", command=self.clear_log_btn_callback)
clear_log_btn.config(font=self.mid_font, width=_width)
clear_log_btn.grid(row=0, column=2, padx=_padx)
def select_dir_btn_callback(self):
"""选择文件夹按钮回调函数"""
self.select_dir_entry_var.set(filedialog.askdirectory())
def rename_file_btn_callback(self):
"""重命名按钮回调函数"""
_workspace = self.select_dir_entry_var.get()
# 如果没有选定就直接结束callback
if _workspace == "":
messagebox.showinfo(title="info", message="Please choose directory!")
return
# 如果文件夹不存在报错
if not os.path.isdir(_workspace):
messagebox.showerror(title="error", text="Directory\n{}\nnot exists!")
return
# 置为可修改状态
self.log_text_area.configure(state=tk.NORMAL)
self.log_text_area.insert(tk.END, "[{}] rename started\n".format(time.asctime()))
# 得到需要重命名的文件列表
abs_names = [os.path.join(_workspace, _) for _ in os.listdir(_workspace)]
abs_file_names = [_ for _ in abs_names if is_vaild_file(self, _)]
# 遍历文件
for abs_filename in abs_file_names:
# 得到新的文件名
new_abs_filename = os.path.join(_workspace,
get_hash(self, abs_filename) + os.path.splitext(abs_filename)[1])
try:
# 如果计算出的新文件名和旧文件名相同,跳过
if abs_filename == new_abs_filename:
self.log_text_area.insert(tk.END, "[INFO] 已重命名过,跳过({})\n".format(abs_filename))
continue
# 如果计算出的新文件名和旧文件名相同,但是新文件名已存在,将此文件移动到 ./backup/{time} 目录下
if os.path.exists(new_abs_filename):
self.log_text_area.insert(tk.END, "[WARN] 文件名经存在,跳过并移入备份文件夹({})\n".format(abs_filename))
move_file(abs_filename)
continue
# 重命名文件
os.rename(abs_filename, new_abs_filename)
self.log_text_area.insert(tk.END, "[INFO] 重命名:{} -> {}\n".format(abs_filename, new_abs_filename))
except IOError as err:
self.log_text_area.insert(tk.END, "[ERROR] {}\n".format(err))
# 置为不可修改状态
self.log_text_area.configure(state=tk.DISABLED)
def copy_log_btn_callback(self):
"""复制日志到剪贴板"""
self.root.clipboard_clear()
self.root.clipboard_append(self.log_text_area.get("1.0", tk.END))
messagebox.showinfo(title="info", message="Log has been copied to clipboard")
def clear_log_btn_callback(self):
"""清楚日志按钮"""
if messagebox.askyesno(title="clear log", message="Are you sure?"):
self.log_text_area.config(state=tk.NORMAL)
self.log_text_area.delete("1.0", tk.END)
self.log_text_area.config(state=tk.DISABLED)
| apache-2.0 |
amohanta/thug | src/ActiveX/modules/RealPlayer.py | 8 | 1600 | # RealMedia RealPlayer Ierpplug.DLL ActiveX Control
# CVE-2007-5601
import logging
log = logging.getLogger("Thug")
def DoAutoUpdateRequest(self, arg0, arg1, arg2):
if len(arg0) > 1000 or len(arg1) > 1000:
log.ThugLogging.log_exploit_event(self._window.url,
"RealMedia RealPlayer Ierpplug.DLL ActiveX",
"Overflow in DoAutoUpdateRequest",
cve = "CVE-2007-5601")
def PlayerProperty(self, arg):
if arg == 'PRODUCTVERSION':
return '6.0.14.552'
if len(arg) > 1000:
log.ThugLogging.log_exploit_event(self._window.url,
"RealMedia RealPlayer Ierpplug.DLL ActiveX",
"Overflow in PlayerProperty",
cve = "CVE-2007-5601")
def Import(self, arg):
if len(arg) > 0x8000:
log.ThugLogging.log_exploit_event(self._window.url,
"RealMedia RealPlayer Ierpplug.DLL ActiveX",
"Overflow in Import",
cve = "CVE-2007-5601")
def SetConsole(self, val):
self.__dict__['Console'] = val
if len(val) >= 32:
log.ThugLogging.log_exploit_event(self._window.url,
"RealMedia RealPlayer rmoc3260.DLL ActiveX",
"Overflow in Console property",
cve = "CVE-2007-5601")
| gpl-2.0 |
betoesquivel/CIE | flask/lib/python2.7/site-packages/pip-1.5.6-py2.7.egg/pip/vcs/bazaar.py | 393 | 4943 | import os
import tempfile
import re
from pip.backwardcompat import urlparse
from pip.log import logger
from pip.util import rmtree, display_path, call_subprocess
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
class Bazaar(VersionControl):
name = 'bzr'
dirname = '.bzr'
repo_name = 'branch'
bundle_file = 'bzr-branch.txt'
schemes = ('bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp', 'bzr+lp')
guide = ('# This was a Bazaar branch; to make it a branch again run:\n'
'bzr branch -r %(rev)s %(url)s .\n')
def __init__(self, url=None, *args, **kwargs):
super(Bazaar, self).__init__(url, *args, **kwargs)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urlparse, 'uses_fragment', None):
urlparse.uses_fragment.extend(['lp'])
urlparse.non_hierarchical.extend(['lp'])
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
match = re.search(r'^bzr\s*branch\s*-r\s*(\d*)', line)
if match:
rev = match.group(1).strip()
url = line[match.end():].strip().split(None, 1)[0]
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Bazaar repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
if os.path.exists(location):
# Remove the location to make sure Bazaar can export it correctly
rmtree(location)
try:
call_subprocess([self.cmd, 'export', location], cwd=temp_dir,
filter_stdout=self._filter, show_stdout=False)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
call_subprocess([self.cmd, 'switch', url], cwd=dest)
def update(self, dest, rev_options):
call_subprocess(
[self.cmd, 'pull', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Checking out %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess(
[self.cmd, 'branch', '-q'] + rev_options + [url, dest])
def get_url_rev(self):
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
url, rev = super(Bazaar, self).get_url_rev()
if url.startswith('ssh://'):
url = 'bzr+' + url
return url, rev
def get_url(self, location):
urls = call_subprocess(
[self.cmd, 'info'], show_stdout=False, cwd=location)
for line in urls.splitlines():
line = line.strip()
for x in ('checkout of branch: ',
'parent branch: '):
if line.startswith(x):
repo = line.split(x)[1]
if self._is_local_repository(repo):
return path_to_url(repo)
return repo
return None
def get_revision(self, location):
revision = call_subprocess(
[self.cmd, 'revno'], show_stdout=False, cwd=location)
return revision.splitlines()[-1]
def get_tag_revs(self, location):
tags = call_subprocess(
[self.cmd, 'tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([.\w-]+)\s*(.*)$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('bzr:'):
repo = 'bzr+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
tag_revs = self.get_tag_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
else:
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), current_rev)
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
vcs.register(Bazaar)
| mit |
princespn/codeigniter | user_guide_src/cilexer/cilexer/cilexer.py | 241 | 2214 | # CodeIgniter
# http://codeigniter.com
#
# An open source application development framework for PHP
#
# This content is released under the MIT License (MIT)
#
# Copyright (c) 2014 - 2015, British Columbia Institute of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Copyright (c) 2008 - 2014, EllisLab, Inc. (http://ellislab.com/)
# Copyright (c) 2014 - 2015, British Columbia Institute of Technology (http://bcit.ca/)
#
# http://opensource.org/licenses/MIT MIT License
import re
import copy
from pygments.lexer import DelegatingLexer
from pygments.lexers.web import PhpLexer, HtmlLexer
__all__ = ['CodeIgniterLexer']
class CodeIgniterLexer(DelegatingLexer):
"""
Handles HTML, PHP, JavaScript, and CSS is highlighted
PHP is highlighted with the "startline" option
"""
name = 'CodeIgniter'
aliases = ['ci', 'codeigniter']
filenames = ['*.html', '*.css', '*.php', '*.xml', '*.static']
mimetypes = ['text/html', 'application/xhtml+xml']
def __init__(self, **options):
super(CodeIgniterLexer, self).__init__(HtmlLexer,
PhpLexer,
startinline=True)
| mit |
AlanCoding/tower-cli | tower_cli/resources/job_event.py | 2 | 1664 | # Copyright 2018, Red Hat, Inc.
# Alan Rominger <arominge@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tower_cli import models
from tower_cli.cli import types
class Resource(models.BaseResource):
"""A resource for job events."""
cli_help = 'View events from jobs.'
endpoint = '/job_events/'
internal = True
job = models.Field(
type=types.Related('job'), display=True
)
host = models.Field(
type=types.Related('host'), display=True
)
parent = models.Field(
type=types.Related('job_event'), display=False
)
event = models.Field()
playbook = models.Field()
play = models.Field()
task = models.Field()
role = models.Field()
counter = models.Field(display=False)
event_level = models.Field(display=False)
event_data = models.Field(display=False)
failed = models.Field(display=False, type=bool)
changed = models.Field(type=bool)
verbosity = models.Field(display=False, type=int)
def __getattribute__(self, attr):
if attr == 'delete':
raise AttributeError
return super(Resource, self).__getattribute__(attr)
| apache-2.0 |
ericspod/Eidolon | tests/meshtests/billboardtest.py | 1 | 1140 | # Eidolon Biomedical Framework
# Copyright (C) 2016-8 Eric Kerfoot, King's College London, all rights reserved
#
# This file is part of Eidolon.
#
# Eidolon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eidolon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program (LICENSE.txt). If not, see <http://www.gnu.org/licenses/>
from eidolon import vec3, FT_BB_POINT, BoundBox, PyVertexBuffer
nodes=[vec3(0,0,0),vec3(10.0/3,0,0),vec3(20.0/3,0,0),vec3(10,0,0)]
fig=mgr.callThreadSafe(mgr.scene.createFigure,"testBB","Default",FT_BB_POINT)
vb=PyVertexBuffer(nodes)
fig.fillData(vb,None,True)
mgr.controller.setSeeAllBoundBox(BoundBox(nodes))
mgr.repaint()
| gpl-3.0 |
synergeticsedx/deployment-wipro | common/djangoapps/track/views/tests/test_segmentio.py | 19 | 22011 | """Ensure we can parse events sent to us from the Segment webhook integration"""
from datetime import datetime
import json
from ddt import ddt, data, unpack
from mock import sentinel
from nose.plugins.attrib import attr
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.test.utils import override_settings
from openedx.core.lib.tests.assertions.events import assert_event_matches
from track.middleware import TrackMiddleware
from track.tests import EventTrackingTestCase
from track.views import segmentio
SECRET = 'anything'
ENDPOINT = '/segmentio/test/event'
USER_ID = 10
MOBILE_SHIM_PROCESSOR = [
{'ENGINE': 'track.shim.LegacyFieldMappingProcessor'},
{'ENGINE': 'track.shim.PrefixedEventProcessor'},
]
def expect_failure_with_message(message):
"""Ensure the test raises an exception and does not emit an event"""
def test_decorator(func):
def test_decorated(self, *args, **kwargs):
self.assertRaisesRegexp(segmentio.EventValidationError, message, func, self, *args, **kwargs)
self.assert_no_events_emitted()
return test_decorated
return test_decorator
@attr(shard=3)
@ddt
@override_settings(
TRACKING_SEGMENTIO_WEBHOOK_SECRET=SECRET,
TRACKING_IGNORE_URL_PATTERNS=[ENDPOINT],
TRACKING_SEGMENTIO_ALLOWED_TYPES=['track'],
TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES=['.bi.'],
TRACKING_SEGMENTIO_SOURCE_MAP={'test-app': 'mobile'},
EVENT_TRACKING_PROCESSORS=MOBILE_SHIM_PROCESSOR,
)
class SegmentIOTrackingTestCase(EventTrackingTestCase):
"""Test processing of Segment events"""
def setUp(self):
super(SegmentIOTrackingTestCase, self).setUp()
self.maxDiff = None # pylint: disable=invalid-name
self.request_factory = RequestFactory()
def test_get_request(self):
request = self.request_factory.get(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 405)
self.assert_no_events_emitted()
@override_settings(
TRACKING_SEGMENTIO_WEBHOOK_SECRET=None
)
def test_no_secret_config(self):
request = self.request_factory.post(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def test_no_secret_provided(self):
request = self.request_factory.post(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def test_secret_mismatch(self):
request = self.create_request(key='y')
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def create_request(self, key=None, **kwargs):
"""Create a fake request that emulates a request from the Segment servers to ours"""
if key is None:
key = SECRET
request = self.request_factory.post(ENDPOINT + "?key=" + key, **kwargs)
if 'data' in kwargs:
request.json = json.loads(kwargs['data'])
return request
@data('identify', 'Group', 'Alias', 'Page', 'identify', 'screen')
def test_segmentio_ignore_actions(self, action):
self.post_segmentio_event(action=action)
self.assert_no_events_emitted()
@data('edx.bi.some_name', 'EDX.BI.CAPITAL_NAME')
def test_segmentio_ignore_names(self, name):
self.post_segmentio_event(name=name)
self.assert_no_events_emitted()
def post_segmentio_event(self, **kwargs):
"""Post a fake Segment event to the view that processes it"""
request = self.create_request(
data=self.create_segmentio_event_json(**kwargs),
content_type='application/json'
)
segmentio.track_segmentio_event(request)
def create_segmentio_event(self, **kwargs):
"""Populate a fake Segment event with data of interest"""
action = kwargs.get('action', 'Track')
sample_event = {
"userId": kwargs.get('user_id', USER_ID),
"event": "Did something",
"properties": {
'name': kwargs.get('name', str(sentinel.name)),
'data': kwargs.get('data', {}),
'context': {
'course_id': kwargs.get('course_id') or '',
'app_name': 'edx.mobile.android',
}
},
"channel": 'server',
"context": {
"library": {
"name": kwargs.get('library_name', 'test-app'),
"version": "unknown"
},
"app": {
"version": "1.0.1",
},
'userAgent': str(sentinel.user_agent),
},
"receivedAt": "2014-08-27T16:33:39.100Z",
"timestamp": "2014-08-27T16:33:39.215Z",
"type": action.lower(),
"projectId": "u0j33yjkr8",
"messageId": "qy52hwp4",
"version": 2,
"integrations": {},
"options": {
"library": "unknown",
"providers": {}
},
"action": action
}
if 'context' in kwargs:
sample_event['properties']['context'].update(kwargs['context'])
return sample_event
def create_segmentio_event_json(self, **kwargs):
"""Return a json string containing a fake Segment event"""
return json.dumps(self.create_segmentio_event(**kwargs))
def test_segmentio_ignore_unknown_libraries(self):
self.post_segmentio_event(library_name='foo')
self.assert_no_events_emitted()
@expect_failure_with_message(segmentio.ERROR_USER_NOT_EXIST)
def test_no_user_for_user_id(self):
self.post_segmentio_event(user_id=40)
@expect_failure_with_message(segmentio.ERROR_INVALID_USER_ID)
def test_invalid_user_id(self):
self.post_segmentio_event(user_id='foobar')
@data('foo/bar/baz', 'course-v1:foo+bar+baz')
def test_success(self, course_id):
middleware = TrackMiddleware()
request = self.create_request(
data=self.create_segmentio_event_json(data={'foo': 'bar'}, course_id=course_id),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
# The middleware normally emits an event, make sure it doesn't in this case.
self.assert_no_events_emitted()
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event = {
'accept_language': '',
'referer': '',
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': str(sentinel.name),
'name': str(sentinel.name),
'event': {'foo': 'bar'},
'agent': str(sentinel.user_agent),
'page': None,
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'application': {
'name': 'edx.mobile.android',
'version': '1.0.1',
},
'user_id': USER_ID,
'course_id': course_id,
'org_id': u'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
},
'app': {
'version': '1.0.1',
},
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
}
finally:
middleware.process_response(request, None)
assert_event_matches(expected_event, self.get_event())
def test_invalid_course_id(self):
request = self.create_request(
data=self.create_segmentio_event_json(course_id='invalid'),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
self.assert_events_emitted()
@expect_failure_with_message(segmentio.ERROR_MISSING_NAME)
def test_missing_name(self):
sample_event_raw = self.create_segmentio_event()
del sample_event_raw['properties']['name']
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_DATA)
def test_missing_data(self):
sample_event_raw = self.create_segmentio_event()
del sample_event_raw['properties']['data']
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_TIMESTAMP)
def test_missing_timestamp(self):
sample_event_raw = self.create_event_without_fields('timestamp')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_RECEIVED_AT)
def test_missing_received_at(self):
sample_event_raw = self.create_event_without_fields('receivedAt')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
def create_event_without_fields(self, *fields):
"""Create a fake event and remove some fields from it"""
event = self.create_segmentio_event()
for field in fields:
if field in event:
del event[field]
return event
def test_string_user_id(self):
User.objects.create(pk=USER_ID, username=str(sentinel.username))
self.post_segmentio_event(user_id=str(USER_ID))
self.assert_events_emitted()
def test_hiding_failure(self):
sample_event_raw = self.create_event_without_fields('timestamp')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
self.assert_no_events_emitted()
@data(
('edx.video.played', 'play_video'),
('edx.video.paused', 'pause_video'),
('edx.video.stopped', 'stop_video'),
('edx.video.loaded', 'load_video'),
('edx.video.position.changed', 'seek_video'),
('edx.video.transcript.shown', 'show_transcript'),
('edx.video.transcript.hidden', 'hide_transcript'),
)
@unpack
def test_video_event(self, name, event_type):
course_id = 'foo/bar/baz'
middleware = TrackMiddleware()
input_payload = {
'current_time': 132.134456,
'module_id': 'i4x://foo/bar/baz/some_module',
'code': 'mobile'
}
if name == 'edx.video.loaded':
# We use the same expected payload for all of these types of events, but the load video event is the only
# one that is not actually expected to contain a "current time" field. So we remove it from the expected
# event here.
del input_payload['current_time']
request = self.create_request(
data=self.create_segmentio_event_json(
name=name,
data=input_payload,
context={
'open_in_browser_url': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity/2',
'course_id': course_id,
'application': {
'name': 'edx.mobileapp.android',
'version': '29',
'component': 'videoplayer'
}
}),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event = {
'accept_language': '',
'referer': '',
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': event_type,
'name': name,
'agent': str(sentinel.user_agent),
'page': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity',
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'user_id': USER_ID,
'course_id': course_id,
'org_id': 'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
},
'app': {
'version': '1.0.1',
},
},
'application': {
'name': 'edx.mobileapp.android',
'version': '29',
'component': 'videoplayer'
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
'event': {
'currentTime': 132.134456,
'id': 'i4x-foo-bar-baz-some_module',
'code': 'mobile'
}
}
if name == 'edx.video.loaded':
# We use the same expected payload for all of these types of events, but the load video event is the
# only one that is not actually expected to contain a "current time" field. So we remove it from the
# expected event here.
del expected_event['event']['currentTime']
finally:
middleware.process_response(request, None)
actual_event = self.get_event()
assert_event_matches(expected_event, actual_event)
@data(
# Verify positive slide case. Verify slide to onSlideSeek. Verify
# edx.video.seeked emitted from iOS v1.0.02 is changed to
# edx.video.position.changed.
(1, 1, "seek_type", "slide", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify negative slide case. Verify slide to onSlideSeek. Verify
# edx.video.seeked to edx.video.position.changed.
(-2, -2, "seek_type", "slide", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify +30 is changed to -30 which is incorrectly emitted in iOS
# v1.0.02. Verify skip to onSkipSeek
(30, -30, "seek_type", "skip", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify the correct case of -30 is also handled as well. Verify skip
# to onSkipSeek
(-30, -30, "seek_type", "skip", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify positive slide case where onSkipSeek is changed to
# onSlideSkip. Verify edx.video.seeked emitted from Android v1.0.02 is
# changed to edx.video.position.changed.
(1, 1, "type", "onSkipSeek", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'),
# Verify positive slide case where onSkipSeek is changed to
# onSlideSkip. Verify edx.video.seeked emitted from Android v1.0.02 is
# changed to edx.video.position.changed.
(-2, -2, "type", "onSkipSeek", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'),
# Verify positive skip case where onSkipSeek is not changed and does
# not become negative.
(30, 30, "type", "onSkipSeek", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'),
# Verify positive skip case where onSkipSeek is not changed.
(-30, -30, "type", "onSkipSeek", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02')
)
@unpack
def test_previous_builds(self,
requested_skip_interval,
expected_skip_interval,
seek_type_key,
seek_type,
expected_seek_type,
name,
expected_name,
platform,
version,
):
"""
Test backwards compatibility of previous app builds
iOS version 1.0.02: Incorrectly emits the skip back 30 seconds as +30
instead of -30.
Android version 1.0.02: Skip and slide were both being returned as a
skip. Skip or slide is determined by checking if the skip time is == -30
Additionally, for both of the above mentioned versions, edx.video.seeked
was sent instead of edx.video.position.changed
"""
course_id = 'foo/bar/baz'
middleware = TrackMiddleware()
input_payload = {
"code": "mobile",
"new_time": 89.699177437,
"old_time": 119.699177437,
seek_type_key: seek_type,
"requested_skip_interval": requested_skip_interval,
'module_id': 'i4x://foo/bar/baz/some_module',
}
request = self.create_request(
data=self.create_segmentio_event_json(
name=name,
data=input_payload,
context={
'open_in_browser_url': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity/2',
'course_id': course_id,
'application': {
'name': platform,
'version': version,
'component': 'videoplayer'
}
},
),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event = {
'accept_language': '',
'referer': '',
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': "seek_video",
'name': expected_name,
'agent': str(sentinel.user_agent),
'page': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity',
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'user_id': USER_ID,
'course_id': course_id,
'org_id': 'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
},
'app': {
'version': '1.0.1',
},
},
'application': {
'name': platform,
'version': version,
'component': 'videoplayer'
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
'event': {
"code": "mobile",
"new_time": 89.699177437,
"old_time": 119.699177437,
"type": expected_seek_type,
"requested_skip_interval": expected_skip_interval,
'id': 'i4x-foo-bar-baz-some_module',
}
}
finally:
middleware.process_response(request, None)
actual_event = self.get_event()
assert_event_matches(expected_event, actual_event)
| agpl-3.0 |
mancoast/CPythonPyc_test | fail/314_test_normalization.py | 1 | 3162 | from test.support import run_unittest, open_urlresource
import unittest
import sys
import os
from unicodedata import normalize, unidata_version
TESTDATAFILE = "NormalizationTest.txt"
TESTDATAURL = "http://www.unicode.org/Public/" + unidata_version + "/ucd/" + TESTDATAFILE
if os.path.exists(TESTDATAFILE):
f = open(TESTDATAFILE, encoding='utf-8')
l = f.readline()
f.close()
if not unidata_version in l:
os.unlink(TESTDATAFILE)
class RangeError(Exception):
pass
def NFC(str):
return normalize("NFC", str)
def NFKC(str):
return normalize("NFKC", str)
def NFD(str):
return normalize("NFD", str)
def NFKD(str):
return normalize("NFKD", str)
def unistr(data):
data = [int(x, 16) for x in data.split(" ")]
for x in data:
if x > sys.maxunicode:
raise RangeError
return "".join([chr(x) for x in data])
class NormalizationTest(unittest.TestCase):
def test_main(self):
part1_data = {}
# Hit the exception early
try:
open_urlresource(TESTDATAURL, encoding="utf-8")
except IOError:
self.skipTest("Could not retrieve " + TESTDATAURL)
for line in open_urlresource(TESTDATAURL, encoding="utf-8"):
if '#' in line:
line = line.split('#')[0]
line = line.strip()
if not line:
continue
if line.startswith("@Part"):
part = line.split()[0]
continue
try:
c1,c2,c3,c4,c5 = [unistr(x) for x in line.split(';')[:-1]]
except RangeError:
# Skip unsupported characters;
# try atleast adding c1 if we are in part1
if part == "@Part1":
try:
c1 = unistr(line.split(';')[0])
except RangeError:
pass
else:
part1_data[c1] = 1
continue
# Perform tests
self.assertTrue(c2 == NFC(c1) == NFC(c2) == NFC(c3), line)
self.assertTrue(c4 == NFC(c4) == NFC(c5), line)
self.assertTrue(c3 == NFD(c1) == NFD(c2) == NFD(c3), line)
self.assertTrue(c5 == NFD(c4) == NFD(c5), line)
self.assertTrue(c4 == NFKC(c1) == NFKC(c2) == \
NFKC(c3) == NFKC(c4) == NFKC(c5),
line)
self.assertTrue(c5 == NFKD(c1) == NFKD(c2) == \
NFKD(c3) == NFKD(c4) == NFKD(c5),
line)
# Record part 1 data
if part == "@Part1":
part1_data[c1] = 1
# Perform tests for all other data
for c in range(sys.maxunicode+1):
X = chr(c)
if X in part1_data:
continue
self.assertTrue(X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X), c)
def test_bug_834676(self):
# Check for bug 834676
normalize('NFC', '\ud55c\uae00')
def test_main():
run_unittest(NormalizationTest)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
myerpengine/odoo | addons/account/account_bank_statement.py | 20 | 29774 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_bank_statement(osv.osv):
def create(self, cr, uid, vals, context=None):
if 'line_ids' in vals:
for idx, line in enumerate(vals['line_ids']):
line[2]['sequence'] = idx + 1
return super(account_bank_statement, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
res = super(account_bank_statement, self).write(cr, uid, ids, vals, context=context)
account_bank_statement_line_obj = self.pool.get('account.bank.statement.line')
for statement in self.browse(cr, uid, ids, context):
for idx, line in enumerate(statement.line_ids):
account_bank_statement_line_obj.write(cr, uid, [line.id], {'sequence': idx + 1}, context=context)
return res
def _default_journal_id(self, cr, uid, context=None):
if context is None:
context = {}
journal_pool = self.pool.get('account.journal')
journal_type = context.get('journal_type', False)
company_id = self.pool.get('res.company')._company_default_get(cr, uid, 'account.bank.statement',context=context)
if journal_type:
ids = journal_pool.search(cr, uid, [('type', '=', journal_type),('company_id','=',company_id)])
if ids:
return ids[0]
return False
def _end_balance(self, cursor, user, ids, name, attr, context=None):
res = {}
for statement in self.browse(cursor, user, ids, context=context):
res[statement.id] = statement.balance_start
for line in statement.line_ids:
res[statement.id] += line.amount
return res
def _get_period(self, cr, uid, context=None):
periods = self.pool.get('account.period').find(cr, uid, context=context)
if periods:
return periods[0]
return False
def _currency(self, cursor, user, ids, name, args, context=None):
res = {}
res_currency_obj = self.pool.get('res.currency')
res_users_obj = self.pool.get('res.users')
default_currency = res_users_obj.browse(cursor, user,
user, context=context).company_id.currency_id
for statement in self.browse(cursor, user, ids, context=context):
currency = statement.journal_id.currency
if not currency:
currency = default_currency
res[statement.id] = currency.id
currency_names = {}
for currency_id, currency_name in res_currency_obj.name_get(cursor,
user, [x for x in res.values()], context=context):
currency_names[currency_id] = currency_name
for statement_id in res.keys():
currency_id = res[statement_id]
res[statement_id] = (currency_id, currency_names[currency_id])
return res
def _get_statement(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('account.bank.statement.line').browse(cr, uid, ids, context=context):
result[line.statement_id.id] = True
return result.keys()
_order = "date desc, id desc"
_name = "account.bank.statement"
_description = "Bank Statement"
_inherit = ['mail.thread']
_columns = {
'name': fields.char('Reference', size=64, required=True, states={'draft': [('readonly', False)]}, readonly=True, help='if you give the Name other then /, its created Accounting Entries Move will be with same name as statement name. This allows the statement entries to have the same references than the statement itself'), # readonly for account_cash_statement
'date': fields.date('Date', required=True, states={'confirm': [('readonly', True)]}, select=True),
'journal_id': fields.many2one('account.journal', 'Journal', required=True,
readonly=True, states={'draft':[('readonly',False)]}),
'period_id': fields.many2one('account.period', 'Period', required=True,
states={'confirm':[('readonly', True)]}),
'balance_start': fields.float('Starting Balance', digits_compute=dp.get_precision('Account'),
states={'confirm':[('readonly',True)]}),
'balance_end_real': fields.float('Ending Balance', digits_compute=dp.get_precision('Account'),
states={'confirm': [('readonly', True)]}, help="Computed using the cash control lines"),
'balance_end': fields.function(_end_balance,
store = {
'account.bank.statement': (lambda self, cr, uid, ids, c={}: ids, ['line_ids','move_line_ids','balance_start'], 10),
'account.bank.statement.line': (_get_statement, ['amount'], 10),
},
string="Computed Balance", help='Balance as calculated based on Opening Balance and transaction lines'),
'company_id': fields.related('journal_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'line_ids': fields.one2many('account.bank.statement.line',
'statement_id', 'Statement lines',
states={'confirm':[('readonly', True)]}),
'move_line_ids': fields.one2many('account.move.line', 'statement_id',
'Entry lines', states={'confirm':[('readonly',True)]}),
'state': fields.selection([('draft', 'New'),
('open','Open'), # used by cash statements
('confirm', 'Closed')],
'Status', required=True, readonly="1",
help='When new statement is created the status will be \'Draft\'.\n'
'And after getting confirmation from the bank it will be in \'Confirmed\' status.'),
'currency': fields.function(_currency, string='Currency',
type='many2one', relation='res.currency'),
'account_id': fields.related('journal_id', 'default_debit_account_id', type='many2one', relation='account.account', string='Account used in this journal', readonly=True, help='used in statement reconciliation domain, but shouldn\'t be used elswhere.'),
'cash_control': fields.related('journal_id', 'cash_control' , type='boolean', relation='account.journal',string='Cash control'),
}
_defaults = {
'name': "/",
'date': fields.date.context_today,
'state': 'draft',
'journal_id': _default_journal_id,
'period_id': _get_period,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.bank.statement',context=c),
}
def _check_company_id(self, cr, uid, ids, context=None):
for statement in self.browse(cr, uid, ids, context=context):
if statement.company_id.id != statement.period_id.company_id.id:
return False
return True
_constraints = [
(_check_company_id, 'The journal and period chosen have to belong to the same company.', ['journal_id','period_id']),
]
def onchange_date(self, cr, uid, ids, date, company_id, context=None):
"""
Find the correct period to use for the given date and company_id, return it and set it in the context
"""
res = {}
period_pool = self.pool.get('account.period')
if context is None:
context = {}
ctx = context.copy()
ctx.update({'company_id': company_id})
pids = period_pool.find(cr, uid, dt=date, context=ctx)
if pids:
res.update({'period_id': pids[0]})
context.update({'period_id': pids[0]})
return {
'value':res,
'context':context,
}
def button_dummy(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {}, context=context)
def _prepare_move(self, cr, uid, st_line, st_line_number, context=None):
"""Prepare the dict of values to create the move from a
statement line. This method may be overridden to implement custom
move generation (making sure to call super() to establish
a clean extension chain).
:param browse_record st_line: account.bank.statement.line record to
create the move from.
:param char st_line_number: will be used as the name of the generated account move
:return: dict of value to create() the account.move
"""
return {
'journal_id': st_line.statement_id.journal_id.id,
'period_id': st_line.statement_id.period_id.id,
'date': st_line.date,
'name': st_line_number,
'ref': st_line.ref,
}
def _prepare_bank_move_line(self, cr, uid, st_line, move_id, amount, company_currency_id,
context=None):
"""Compute the args to build the dict of values to create the bank move line from a
statement line by calling the _prepare_move_line_vals. This method may be
overridden to implement custom move generation (making sure to call super() to
establish a clean extension chain).
:param browse_record st_line: account.bank.statement.line record to
create the move from.
:param int/long move_id: ID of the account.move to link the move line
:param float amount: amount of the move line
:param int/long company_currency_id: ID of currency of the concerned company
:return: dict of value to create() the bank account.move.line
"""
anl_id = st_line.analytic_account_id and st_line.analytic_account_id.id or False
debit = ((amount<0) and -amount) or 0.0
credit = ((amount>0) and amount) or 0.0
cur_id = False
amt_cur = False
if st_line.statement_id.currency.id <> company_currency_id:
cur_id = st_line.statement_id.currency.id
if st_line.account_id and st_line.account_id.currency_id and st_line.account_id.currency_id.id <> company_currency_id:
cur_id = st_line.account_id.currency_id.id
if cur_id:
res_currency_obj = self.pool.get('res.currency')
amt_cur = -res_currency_obj.compute(cr, uid, company_currency_id, cur_id, amount, context=context)
res = self._prepare_move_line_vals(cr, uid, st_line, move_id, debit, credit,
amount_currency=amt_cur, currency_id=cur_id, analytic_id=anl_id, context=context)
return res
def _get_counter_part_account(sefl, cr, uid, st_line, context=None):
"""Retrieve the account to use in the counterpart move.
This method may be overridden to implement custom move generation (making sure to
call super() to establish a clean extension chain).
:param browse_record st_line: account.bank.statement.line record to
create the move from.
:return: int/long of the account.account to use as counterpart
"""
if st_line.amount >= 0:
return st_line.statement_id.journal_id.default_credit_account_id.id
return st_line.statement_id.journal_id.default_debit_account_id.id
def _get_counter_part_partner(sefl, cr, uid, st_line, context=None):
"""Retrieve the partner to use in the counterpart move.
This method may be overridden to implement custom move generation (making sure to
call super() to establish a clean extension chain).
:param browse_record st_line: account.bank.statement.line record to
create the move from.
:return: int/long of the res.partner to use as counterpart
"""
return st_line.partner_id and st_line.partner_id.id or False
def _prepare_counterpart_move_line(self, cr, uid, st_line, move_id, amount, company_currency_id,
context=None):
"""Compute the args to build the dict of values to create the counter part move line from a
statement line by calling the _prepare_move_line_vals. This method may be
overridden to implement custom move generation (making sure to call super() to
establish a clean extension chain).
:param browse_record st_line: account.bank.statement.line record to
create the move from.
:param int/long move_id: ID of the account.move to link the move line
:param float amount: amount of the move line
:param int/long account_id: ID of account to use as counter part
:param int/long company_currency_id: ID of currency of the concerned company
:return: dict of value to create() the bank account.move.line
"""
account_id = self._get_counter_part_account(cr, uid, st_line, context=context)
partner_id = self._get_counter_part_partner(cr, uid, st_line, context=context)
debit = ((amount > 0) and amount) or 0.0
credit = ((amount < 0) and -amount) or 0.0
cur_id = False
amt_cur = False
if st_line.statement_id.currency.id <> company_currency_id:
amt_cur = st_line.amount
cur_id = st_line.statement_id.currency.id
return self._prepare_move_line_vals(cr, uid, st_line, move_id, debit, credit,
amount_currency = amt_cur, currency_id = cur_id, account_id = account_id,
partner_id = partner_id, context=context)
def _prepare_move_line_vals(self, cr, uid, st_line, move_id, debit, credit, currency_id = False,
amount_currency= False, account_id = False, analytic_id = False,
partner_id = False, context=None):
"""Prepare the dict of values to create the move line from a
statement line. All non-mandatory args will replace the default computed one.
This method may be overridden to implement custom move generation (making sure to
call super() to establish a clean extension chain).
:param browse_record st_line: account.bank.statement.line record to
create the move from.
:param int/long move_id: ID of the account.move to link the move line
:param float debit: debit amount of the move line
:param float credit: credit amount of the move line
:param int/long currency_id: ID of currency of the move line to create
:param float amount_currency: amount of the debit/credit expressed in the currency_id
:param int/long account_id: ID of the account to use in the move line if different
from the statement line account ID
:param int/long analytic_id: ID of analytic account to put on the move line
:param int/long partner_id: ID of the partner to put on the move line
:return: dict of value to create() the account.move.line
"""
acc_id = account_id or st_line.account_id.id
cur_id = currency_id or st_line.statement_id.currency.id
par_id = partner_id or (((st_line.partner_id) and st_line.partner_id.id) or False)
return {
'name': st_line.name,
'date': st_line.date,
'ref': st_line.ref,
'move_id': move_id,
'partner_id': par_id,
'account_id': acc_id,
'credit': credit,
'debit': debit,
'statement_id': st_line.statement_id.id,
'journal_id': st_line.statement_id.journal_id.id,
'period_id': st_line.statement_id.period_id.id,
'currency_id': amount_currency and cur_id,
'amount_currency': amount_currency,
'analytic_account_id': analytic_id,
}
def create_move_from_st_line(self, cr, uid, st_line_id, company_currency_id, st_line_number, context=None):
"""Create the account move from the statement line.
:param int/long st_line_id: ID of the account.bank.statement.line to create the move from.
:param int/long company_currency_id: ID of the res.currency of the company
:param char st_line_number: will be used as the name of the generated account move
:return: ID of the account.move created
"""
if context is None:
context = {}
res_currency_obj = self.pool.get('res.currency')
account_move_obj = self.pool.get('account.move')
account_move_line_obj = self.pool.get('account.move.line')
account_bank_statement_line_obj = self.pool.get('account.bank.statement.line')
st_line = account_bank_statement_line_obj.browse(cr, uid, st_line_id, context=context)
st = st_line.statement_id
context.update({'date': st_line.date})
move_vals = self._prepare_move(cr, uid, st_line, st_line_number, context=context)
move_id = account_move_obj.create(cr, uid, move_vals, context=context)
account_bank_statement_line_obj.write(cr, uid, [st_line.id], {
'move_ids': [(4, move_id, False)]
})
torec = []
acc_cur = ((st_line.amount<=0) and st.journal_id.default_debit_account_id) or st_line.account_id
context.update({
'res.currency.compute.account': acc_cur,
})
amount = res_currency_obj.compute(cr, uid, st.currency.id,
company_currency_id, st_line.amount, context=context)
bank_move_vals = self._prepare_bank_move_line(cr, uid, st_line, move_id, amount,
company_currency_id, context=context)
move_line_id = account_move_line_obj.create(cr, uid, bank_move_vals, context=context)
torec.append(move_line_id)
counterpart_move_vals = self._prepare_counterpart_move_line(cr, uid, st_line, move_id,
amount, company_currency_id, context=context)
account_move_line_obj.create(cr, uid, counterpart_move_vals, context=context)
for line in account_move_line_obj.browse(cr, uid, [x.id for x in
account_move_obj.browse(cr, uid, move_id,
context=context).line_id],
context=context):
if line.state <> 'valid':
raise osv.except_osv(_('Error!'),
_('Journal item "%s" is not valid.') % line.name)
# Bank statements will not consider boolean on journal entry_posted
account_move_obj.post(cr, uid, [move_id], context=context)
return move_id
def get_next_st_line_number(self, cr, uid, st_number, st_line, context=None):
return st_number + '/' + str(st_line.sequence)
def balance_check(self, cr, uid, st_id, journal_type='bank', context=None):
st = self.browse(cr, uid, st_id, context=context)
if not ((abs((st.balance_end or 0.0) - st.balance_end_real) < 0.0001) or (abs((st.balance_end or 0.0) - st.balance_end_real) < 0.0001)):
raise osv.except_osv(_('Error!'),
_('The statement balance is incorrect !\nThe expected balance (%.2f) is different than the computed one. (%.2f)') % (st.balance_end_real, st.balance_end))
return True
def statement_close(self, cr, uid, ids, journal_type='bank', context=None):
return self.write(cr, uid, ids, {'state':'confirm'}, context=context)
def check_status_condition(self, cr, uid, state, journal_type='bank'):
return state in ('draft','open')
def button_confirm_bank(self, cr, uid, ids, context=None):
obj_seq = self.pool.get('ir.sequence')
if context is None:
context = {}
for st in self.browse(cr, uid, ids, context=context):
j_type = st.journal_id.type
company_currency_id = st.journal_id.company_id.currency_id.id
if not self.check_status_condition(cr, uid, st.state, journal_type=j_type):
continue
self.balance_check(cr, uid, st.id, journal_type=j_type, context=context)
if (not st.journal_id.default_credit_account_id) \
or (not st.journal_id.default_debit_account_id):
raise osv.except_osv(_('Configuration Error!'),
_('Please verify that an account is defined in the journal.'))
if not st.name == '/':
st_number = st.name
else:
c = {'fiscalyear_id': st.period_id.fiscalyear_id.id}
if st.journal_id.sequence_id:
st_number = obj_seq.next_by_id(cr, uid, st.journal_id.sequence_id.id, context=c)
else:
st_number = obj_seq.next_by_code(cr, uid, 'account.bank.statement', context=c)
for line in st.move_line_ids:
if line.state <> 'valid':
raise osv.except_osv(_('Error!'),
_('The account entries lines are not in valid state.'))
for st_line in st.line_ids:
if st_line.analytic_account_id:
if not st.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to assign an analytic journal on the '%s' journal!") % (st.journal_id.name,))
if not st_line.amount:
continue
st_line_number = self.get_next_st_line_number(cr, uid, st_number, st_line, context)
self.create_move_from_st_line(cr, uid, st_line.id, company_currency_id, st_line_number, context)
self.write(cr, uid, [st.id], {
'name': st_number,
'balance_end_real': st.balance_end
}, context=context)
self.message_post(cr, uid, [st.id], body=_('Statement %s confirmed, journal items were created.') % (st_number,), context=context)
return self.write(cr, uid, ids, {'state':'confirm'}, context=context)
def button_cancel(self, cr, uid, ids, context=None):
done = []
account_move_obj = self.pool.get('account.move')
for st in self.browse(cr, uid, ids, context=context):
if st.state=='draft':
continue
move_ids = []
for line in st.line_ids:
move_ids += [x.id for x in line.move_ids]
account_move_obj.button_cancel(cr, uid, move_ids, context=context)
account_move_obj.unlink(cr, uid, move_ids, context)
done.append(st.id)
return self.write(cr, uid, done, {'state':'draft'}, context=context)
def _compute_balance_end_real(self, cr, uid, journal_id, context=None):
res = False
if journal_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
if journal.with_last_closing_balance:
cr.execute('SELECT balance_end_real \
FROM account_bank_statement \
WHERE journal_id = %s AND NOT state = %s \
ORDER BY date DESC,id DESC LIMIT 1', (journal_id, 'draft'))
res = cr.fetchone()
return res and res[0] or 0.0
def onchange_journal_id(self, cr, uid, statement_id, journal_id, context=None):
if not journal_id:
return {}
balance_start = self._compute_balance_end_real(cr, uid, journal_id, context=context)
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
currency = journal.currency or journal.company_id.currency_id
res = {'balance_start': balance_start, 'company_id': journal.company_id.id, 'currency': currency.id}
if journal.type == 'cash':
res['cash_control'] = journal.cash_control
return {'value': res}
def unlink(self, cr, uid, ids, context=None):
stat = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for t in stat:
if t['state'] in ('draft'):
unlink_ids.append(t['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a bank statement, you must first cancel it to delete related journal items.'))
osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if context is None:
context = {}
default = default.copy()
default['move_line_ids'] = []
return super(account_bank_statement, self).copy(cr, uid, id, default, context=context)
def button_journal_entries(self, cr, uid, ids, context=None):
ctx = (context or {}).copy()
ctx['journal_id'] = self.browse(cr, uid, ids[0], context=context).journal_id.id
return {
'name': _('Journal Items'),
'view_type':'form',
'view_mode':'tree',
'res_model':'account.move.line',
'view_id':False,
'type':'ir.actions.act_window',
'domain':[('statement_id','in',ids)],
'context':ctx,
}
class account_bank_statement_line(osv.osv):
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
obj_partner = self.pool.get('res.partner')
if context is None:
context = {}
if not partner_id:
return {}
part = obj_partner.browse(cr, uid, partner_id, context=context)
if not part.supplier and not part.customer:
type = 'general'
elif part.supplier and part.customer:
type = 'general'
else:
if part.supplier == True:
type = 'supplier'
if part.customer == True:
type = 'customer'
res_type = self.onchange_type(cr, uid, ids, partner_id=partner_id, type=type, context=context)
if res_type['value'] and res_type['value'].get('account_id', False):
return {'value': {'type': type, 'account_id': res_type['value']['account_id']}}
return {'value': {'type': type}}
def onchange_type(self, cr, uid, line_id, partner_id, type, context=None):
res = {'value': {}}
obj_partner = self.pool.get('res.partner')
if context is None:
context = {}
if not partner_id:
return res
account_id = False
line = self.browse(cr, uid, line_id, context=context)
if not line or (line and not line[0].account_id):
part = obj_partner.browse(cr, uid, partner_id, context=context)
if type == 'supplier':
account_id = part.property_account_payable.id
else:
account_id = part.property_account_receivable.id
res['value']['account_id'] = account_id
return res
_order = "statement_id desc, sequence"
_name = "account.bank.statement.line"
_description = "Bank Statement Line"
_columns = {
'name': fields.char('Description', required=True),
'date': fields.date('Date', required=True),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Account')),
'type': fields.selection([
('supplier','Supplier'),
('customer','Customer'),
('general','General')
], 'Type', required=True),
'partner_id': fields.many2one('res.partner', 'Partner'),
'account_id': fields.many2one('account.account','Account',
required=True),
'statement_id': fields.many2one('account.bank.statement', 'Statement',
select=True, required=True, ondelete='cascade'),
'journal_id': fields.related('statement_id', 'journal_id', type='many2one', relation='account.journal', string='Journal', store=True, readonly=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'move_ids': fields.many2many('account.move',
'account_bank_statement_line_move_rel', 'statement_line_id','move_id',
'Moves'),
'ref': fields.char('Reference', size=32),
'note': fields.text('Notes'),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of bank statement lines."),
'company_id': fields.related('statement_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
_defaults = {
'name': lambda self,cr,uid,context={}: self.pool.get('ir.sequence').get(cr, uid, 'account.bank.statement.line'),
'date': lambda self,cr,uid,context={}: context.get('date', fields.date.context_today(self,cr,uid,context=context)),
'type': 'general',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kaltura/server | alpha/scripts/utils/apiGrep.py | 1 | 4097 | #!/usr/bin/python
from optparse import OptionParser
import sys
import os
def isLineLogStart(curLine):
if len(curLine) < 20:
return False
if (curLine[4] == '-' and curLine[7] == '-' and curLine[10] == ' ' and
curLine[13] == ':' and curLine[16] == ':'):
return True
return False
def parseCmdLine():
parser = OptionParser(usage='%prog [OPTION]... PATTERN [FILE]...', add_help_option=False)
parser.add_option("--help", help="display this help and exit", action="help")
parser.add_option("-h", "--no-filename",
action="store_true", dest="noFilename", default=False,
help="suppress the file name prefix on output")
parser.add_option("-H", "--with-filename",
action="store_true", dest="withFilename", default=False,
help="print the file name for each match")
parser.add_option("--label", dest="stdinLabel", default="(standard input)", metavar="LABEL",
help="use LABEL as the standard input file name prefix")
parser.add_option("-i", "--ignore-case",
action="store_true", dest="ignoreCase", default=False,
help="ignore case distinctions")
parser.add_option("--match-any",
action="store_true", dest="matchAny", default=False,
help="match the pattern against any line (default is to match only starting log lines)")
parser.add_option("-v", "--invert-match",
action="store_true", dest="invertMatch", default=False,
help="select non-matching lines")
return parser.parse_args()
def shellQuote(s):
return "'" + s.replace("'", "'\\''") + "'"
def matchCaseSensitive(pattern, block):
return pattern in block
def matchCaseInsensitive(pattern, block):
return pattern in block.lower()
def processFileMatchStart(inputFile, pattern, prefix):
output = False
for curLine in inputFile:
logStart = isLineLogStart(curLine)
if output:
if not logStart:
print prefix + curLine.rstrip()
continue
output = False
if logStart and match(pattern, curLine):
print prefix + curLine.rstrip()
output = True
def processFileMatchAny(inputFile, pattern, prefix):
block = ''
for curLine in inputFile:
if isLineLogStart(curLine):
if match(pattern, block):
print prefix + block.rstrip().replace('\n', '\n' + prefix)
block = curLine
elif len(block) < 10485760: # 10MB
block += curLine
if match(pattern, block):
print prefix + block.rstrip().replace('\n', '\n' + prefix)
# parse the command line
(options, args) = parseCmdLine()
if len(args) < 1:
baseName = os.path.basename(__file__)
print 'Usage: python %s [OPTION]... PATTERN [FILE]...' % baseName
print 'Try `python %s --help` for more information.' % baseName
sys.exit(1)
pattern = args[0]
fileNames = args[1:]
if len(fileNames) == 0:
fileNames = ['-']
if options.withFilename:
outputFileName = True
elif options.noFilename:
outputFileName = False
else:
outputFileName = len(fileNames) > 1
if options.matchAny:
processFile = processFileMatchAny
else:
processFile = processFileMatchStart
if options.ignoreCase:
match = matchCaseInsensitive
pattern = pattern.lower()
else:
match = matchCaseSensitive
if options.invertMatch:
originalMatch = match
match = lambda p, b: not originalMatch(p, b)
prefix = ''
for fileName in fileNames:
if fileName.endswith('.gz'):
# using zcat | python is faster than using python's gzip module
params = [__file__, '--label=' + fileName]
if outputFileName:
params.append('-H')
if options.matchAny:
params.append('--match-any')
if options.ignoreCase:
params.append('-i')
if options.invertMatch:
params.append('-v')
params.append(pattern)
params = ' '.join(map(shellQuote, params))
cmdLine = "gzip -cd %s | python %s" % (shellQuote(fileName), params)
if os.system(cmdLine) != 0:
break
continue
if fileName == '-':
inputFile = sys.stdin
else:
inputFile = file(fileName, 'r')
# get the prefix
if outputFileName:
if fileName == '-':
prefix = options.stdinLabel + ':'
else:
prefix = '%s:' % fileName
try:
processFile(inputFile, pattern, prefix)
except IOError: # broken pipe
sys.exit(1)
| agpl-3.0 |
dulaccc/django-accounting | accounting/apps/books/migrations/0004_auto_20141104_1026.py | 3 | 1288 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import accounting.apps.books.utils
def next_invoice_number():
return 100
class Migration(migrations.Migration):
dependencies = [
('books', '0003_auto_20141029_1606'),
]
operations = [
migrations.AlterModelOptions(
name='bill',
options={'ordering': ('-number',)},
),
migrations.AlterModelOptions(
name='estimate',
options={'ordering': ('-number',)},
),
migrations.AlterModelOptions(
name='invoice',
options={'ordering': ('-number',)},
),
migrations.AlterField(
model_name='bill',
name='number',
field=models.CharField(max_length=6, db_index=True, default=next_invoice_number),
),
migrations.AlterField(
model_name='estimate',
name='number',
field=models.CharField(max_length=6, db_index=True, default=next_invoice_number),
),
migrations.AlterField(
model_name='invoice',
name='number',
field=models.CharField(max_length=6, db_index=True, default=next_invoice_number),
),
]
| mit |
clearcare/twilio-python | tests/test_members.py | 3 | 1452 | from mock import patch
from tools import create_mock_json
from twilio.rest.resources import Members
QUEUE_SID = "QU1b9faddec3d54ec18488f86c83019bf0"
ACCOUNT_SID = "AC123"
AUTH = (ACCOUNT_SID, "token")
CALL_SID = "CAaaf2e9ded94aba3e57c42a3d55be6ff2"
BASE_URI = "https://api.twilio.com/2010-04-01/Accounts/AC123/Queues/%s" % (
QUEUE_SID)
TWIML_URL = "example_twiml_url"
list_resource = Members(BASE_URI, AUTH)
@patch("twilio.rest.resources.base.make_twilio_request")
def test_members_list(mock):
resp = create_mock_json("tests/resources/members_list.json")
mock.return_value = resp
uri = "%s/Members" % (BASE_URI)
list_resource.list()
mock.assert_called_with("GET", uri, params={}, auth=AUTH)
@patch("twilio.rest.resources.base.make_twilio_request")
def test_members_dequeue_front(mock):
resp = create_mock_json("tests/resources/members_instance.json")
mock.return_value = resp
uri = "%s/Members/Front" % (BASE_URI)
list_resource.dequeue(TWIML_URL)
mock.assert_called_with("POST", uri, data={"Url": TWIML_URL}, auth=AUTH)
@patch("twilio.rest.resources.base.make_twilio_request")
def test_members_dequeue_call(mock):
resp = create_mock_json("tests/resources/members_instance.json")
mock.return_value = resp
uri = "%s/Members/%s" % (BASE_URI, CALL_SID)
list_resource.dequeue(TWIML_URL, call_sid=CALL_SID)
mock.assert_called_with("POST", uri, data={"Url": TWIML_URL}, auth=AUTH)
| mit |
flgiordano/netcash | +/google-cloud-sdk/lib/surface/sql/instances/patch.py | 1 | 13800 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Updates the settings of a Cloud SQL instance."""
from googlecloudsdk.api_lib.sql import errors
from googlecloudsdk.api_lib.sql import instances
from googlecloudsdk.api_lib.sql import operations
from googlecloudsdk.api_lib.sql import validate
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import resource_printer
from googlecloudsdk.core.console import console_io
from googlecloudsdk.third_party.apitools.base.py import encoding
class _BasePatch(object):
"""Updates the settings of a Cloud SQL instance."""
@classmethod
def Args(cls, parser):
"""Args is called by calliope to gather arguments for this command.
Please add arguments in alphabetical order except for no- or a clear-
pair for that argument which can follow the argument itself.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'--activation-policy',
required=False,
choices=['ALWAYS', 'NEVER', 'ON_DEMAND'],
help='The activation policy for this instance. This specifies when the '
'instance should be activated and is applicable only when the '
'instance state is RUNNABLE.')
parser.add_argument(
'--assign-ip',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='The instance must be assigned an IP address.')
gae_apps_group = parser.add_mutually_exclusive_group()
gae_apps_group.add_argument(
'--authorized-gae-apps',
type=arg_parsers.ArgList(min_length=1),
metavar='APP',
required=False,
action=arg_parsers.FloatingListValuesCatcher(),
help='A list of App Engine app IDs that can access this instance.')
gae_apps_group.add_argument(
'--clear-gae-apps',
required=False,
action='store_true',
help=('Specified to clear the list of App Engine apps that can access '
'this instance.'))
networks_group = parser.add_mutually_exclusive_group()
networks_group.add_argument(
'--authorized-networks',
type=arg_parsers.ArgList(min_length=1),
metavar='NETWORK',
required=False,
action=arg_parsers.FloatingListValuesCatcher(),
help='The list of external networks that are allowed to connect to the '
'instance. Specified in CIDR notation, also known as \'slash\' '
'notation (e.g. 192.168.100.0/24).')
networks_group.add_argument(
'--clear-authorized-networks',
required=False,
action='store_true',
help='Clear the list of external networks that are allowed to connect '
'to the instance.')
backups_group = parser.add_mutually_exclusive_group()
backups_group.add_argument(
'--backup-start-time',
required=False,
help='The start time of daily backups, specified in the 24 hour format '
'- HH:MM, in the UTC timezone.')
backups_group.add_argument(
'--no-backup',
required=False,
action='store_true',
help='Specified if daily backup should be disabled.')
database_flags_group = parser.add_mutually_exclusive_group()
database_flags_group.add_argument(
'--database-flags',
type=arg_parsers.ArgDict(min_length=1),
metavar='FLAG=VALUE',
required=False,
action=arg_parsers.FloatingListValuesCatcher(),
help='A comma-separated list of database flags to set on the instance. '
'Use an equals sign to separate flag name and value. Flags without '
'values, like skip_grant_tables, can be written out without a value '
'after, e.g., `skip_grant_tables=`. Use on/off for '
'booleans. View the Instance Resource API for allowed flags. '
'(e.g., `--database-flags max_allowed_packet=55555,skip_grant_tables=,'
'log_output=1`)')
database_flags_group.add_argument(
'--clear-database-flags',
required=False,
action='store_true',
help='Clear the database flags set on the instance. '
'WARNING: Instance will be restarted.')
parser.add_argument(
'--enable-bin-log',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='Enable binary log. If backup configuration is disabled, binary '
'log should be disabled as well.')
parser.add_argument(
'--follow-gae-app',
required=False,
help='The App Engine app this instance should follow. It must be in '
'the same region as the instance. '
'WARNING: Instance may be restarted.')
parser.add_argument(
'--gce-zone',
required=False,
help='The preferred Compute Engine zone (e.g. us-central1-a, '
'us-central1-b, etc.). '
'WARNING: Instance may be restarted.')
parser.add_argument(
'instance',
completion_resource='sql.instances',
help='Cloud SQL instance ID.')
parser.add_argument(
'--pricing-plan',
'-p',
required=False,
choices=['PER_USE', 'PACKAGE'],
help='The pricing plan for this instance.')
parser.add_argument(
'--replication',
required=False,
choices=['SYNCHRONOUS', 'ASYNCHRONOUS'],
help='The type of replication this instance uses.')
parser.add_argument(
'--require-ssl',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='mysqld should default to \'REQUIRE X509\' for users connecting '
'over IP.')
parser.add_argument(
'--tier',
'-t',
required=False,
help='The tier of service for this instance, for example D0, D1. '
'WARNING: Instance will be restarted.')
parser.add_argument(
'--enable-database-replication',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='Enable database replication. Applicable only '
'for read replica instance(s). WARNING: Instance will be restarted.')
parser.add_argument(
'--async',
action='store_true',
help='Do not wait for the operation to complete.')
parser.add_argument(
'--diff',
action='store_true',
help='Show what changed as a result of the update.')
def Display(self, args, result):
"""Display prints information about what just happened to stdout.
Args:
args: The same as the args in Run.
result: A dict object representing the operations resource describing the
patch operation if the patch was successful.
"""
if args.diff:
resource_printer.Print(result, 'text')
def _PrintAndConfirmWarningMessage(self, args):
"""Print and confirm warning indicating the effect of applying the patch."""
continue_msg = None
if any([args.tier, args.database_flags, args.clear_database_flags,
args.enable_database_replication is not None]):
continue_msg = ('WARNING: This patch modifies a value that requires '
'your instance to be restarted. Submitting this patch '
'will immediately restart your instance if it\'s running.'
)
else:
if any([args.follow_gae_app, args.gce_zone]):
continue_msg = ('WARNING: This patch modifies the zone your instance '
'is set to run in, which may require it to be moved. '
'Submitting this patch will restart your instance '
'if it is running in a different zone.')
if continue_msg and not console_io.PromptContinue(continue_msg):
raise exceptions.ToolException('canceled by the user.')
def _GetConfirmedClearedFields(self, args, patch_instance):
"""Clear fields according to args and confirm with user."""
cleared_fields = []
if args.clear_gae_apps:
cleared_fields.append('settings.authorizedGaeApplications')
if args.clear_authorized_networks:
cleared_fields.append('settings.ipConfiguration.authorizedNetworks')
if args.clear_database_flags:
cleared_fields.append('settings.databaseFlags')
log.status.write(
'The following message will be used for the patch API method.\n')
log.status.write(
encoding.MessageToJson(
patch_instance, include_fields=cleared_fields)+'\n')
self._PrintAndConfirmWarningMessage(args)
return cleared_fields
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Patch(_BasePatch, base.Command):
"""Updates the settings of a Cloud SQL instance."""
@errors.ReraiseHttpException
def Run(self, args):
"""Updates settings of a Cloud SQL instance using the patch api method.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A dict object representing the operations resource describing the patch
operation if the patch was successful.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
sql_client = self.context['sql_client']
sql_messages = self.context['sql_messages']
resources = self.context['registry']
validate.ValidateInstanceName(args.instance)
instance_ref = resources.Parse(args.instance, collection='sql.instances')
original_instance_resource = sql_client.instances.Get(
instance_ref.Request())
patch_instance = instances.InstancesV1Beta3.ConstructInstanceFromArgs(
sql_messages, args, original=original_instance_resource)
patch_instance.project = instance_ref.project
patch_instance.instance = instance_ref.instance
cleared_fields = self._GetConfirmedClearedFields(args, patch_instance)
with sql_client.IncludeFields(cleared_fields):
result = sql_client.instances.Patch(patch_instance)
operation_ref = resources.Create(
'sql.operations',
operation=result.operation,
project=instance_ref.project,
instance=instance_ref.instance,
)
if args.async:
return sql_client.operations.Get(operation_ref.Request())
operations.OperationsV1Beta3.WaitForOperation(
sql_client, operation_ref, 'Patching Cloud SQL instance')
log.UpdatedResource(instance_ref)
if args.diff:
changed_instance_resource = sql_client.instances.Get(
instance_ref.Request())
return resource_printer.ResourceDiff(
original_instance_resource, changed_instance_resource)
return sql_client.instances.Get(instance_ref.Request())
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class PatchBeta(_BasePatch, base.Command):
"""Updates the settings of a Cloud SQL instance."""
@errors.ReraiseHttpException
def Run(self, args):
"""Updates settings of a Cloud SQL instance using the patch api method.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A dict object representing the operations resource describing the patch
operation if the patch was successful.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
sql_client = self.context['sql_client']
sql_messages = self.context['sql_messages']
resources = self.context['registry']
validate.ValidateInstanceName(args.instance)
instance_ref = resources.Parse(args.instance, collection='sql.instances')
original_instance_resource = sql_client.instances.Get(
instance_ref.Request())
patch_instance = instances.InstancesV1Beta3.ConstructInstanceFromArgs(
sql_messages, args, original=original_instance_resource)
patch_instance.project = instance_ref.project
patch_instance.name = instance_ref.instance
cleared_fields = self._GetConfirmedClearedFields(args, patch_instance)
with sql_client.IncludeFields(cleared_fields):
result_operation = sql_client.instances.Patch(patch_instance)
operation_ref = resources.Create(
'sql.operations',
operation=result_operation.name,
project=instance_ref.project,
instance=instance_ref.instance,
)
if args.async:
return sql_client.operations.Get(operation_ref.Request())
operations.OperationsV1Beta4.WaitForOperation(
sql_client, operation_ref, 'Patching Cloud SQL instance')
log.UpdatedResource(instance_ref)
if args.diff:
changed_instance_resource = sql_client.instances.Get(
instance_ref.Request())
return resource_printer.ResourceDiff(
original_instance_resource, changed_instance_resource)
return sql_client.instances.Get(instance_ref.Request())
| bsd-3-clause |
eric-stanley/freeciv-android | lib/freeciv/gamescreen.py | 2 | 16986 | # Copyright (C) 2011 Michal Zielinski (michal@zielinscy.org.pl)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import division
import ui
import uidialog
import client
import functools
from client import freeciv
import graphics
import citydlg
import gamemenu
import icons
import features
import diplodialog
import empiredlg
import help
import dropbox
import mapdrawer
SELECT_POPUP = 0
features.add_feature('app.full_label_toggle_button', type=bool)
features.add_feature('app.map_tiles', type=bool, default=False)
class ScreenClient(client.Client):
def __init__(self, **kwargs):
client.Client.__init__(self, **kwargs)
self.turn_loading_dialog = None
self.init_ui()
def init_ui(self):
self.ui = ScreenWidget(self)
#def console_line(self, line):
# self.ui.console.line(line)
def end_turn(self):
self.update_meswin([('Ending turn...', None, None)])
super(ScreenClient, self).end_turn()
def update_meswin(self, lines):
self.ui.console.clear()
for line, attrs, tile in lines:
self.ui.console.line(line)
def popup_city_dialog(self, city):
dialog = citydlg.Dialog(self, city)
ui.set(dialog.ui)
def overview_size_changed(self, w, h):
self.ui.resize_left_pane(self.ui.overview.size[0])
def update_menus(self, unit):
self.ui.menu.update(unit)
def disable_menus(self):
self.ui.menu.update(None)
def popup_caravan_dialog(self, unit, home, dest):
can_establish, can_trade, can_wonder = self.get_caravan_options(unit, home, dest)
def establish_trade_route():
unit.perform_activity(client.actions.ACTIVITY_ESTABLISH_TRADE_ROUTE)
def help_wonder():
unit.perform_activity(client.actions.ACTIVITY_HELP_BUILD_WONDER)
items = []
if not can_establish and not can_wonder:
return
if can_establish:
items.append(('Establish trade route', establish_trade_route))
if can_wonder:
items.append(('Help building wonder', help_wonder))
items.append(('Do nothing', lambda: None))
ui.show_list_dialog(items, title='Your %s from %s has arrived to city %s'
% (unit.get_name(), home.get_name(), dest.get_name()),
titlefont=ui.consolefont)
def popup_diplomat_dialog(self, diplomat_action):
if diplomat_action.city:
self.popup_city_diplomat_dialog(diplomat_action)
else:
self.popup_unit_diplomat_dialog(diplomat_action)
# SPY_SABOTAGE_UNIT: "",
# DIPLOMAT_BRIBE: "",
# DIPLOMAT_INCITE: "",
def popup_city_diplomat_dialog(self, diplomat_action):
if diplomat_action.spy:
message = 'Choose Your spy\'s strategy'
else:
message = 'Choose Your diplomat\'s strategy'
action_titles = {
freeciv.const.DIPLOMAT_MOVE: "Keep moving",
freeciv.const.DIPLOMAT_EMBASSY: "Establish Embassy",
freeciv.const.DIPLOMAT_INVESTIGATE: "Investigate City",
freeciv.const.DIPLOMAT_SABOTAGE: "Sabotage City",
freeciv.const.DIPLOMAT_INCITE: "Incite Revolt",
freeciv.const.DIPLOMAT_STEAL: "Steal Technology",
freeciv.const.SPY_POISON: "Poison City",
}
simple_actions = [freeciv.const.DIPLOMAT_EMBASSY,
freeciv.const.DIPLOMAT_INVESTIGATE,
freeciv.const.DIPLOMAT_SABOTAGE,
freeciv.const.DIPLOMAT_MOVE,
freeciv.const.SPY_POISON]
def do_action(action):
if action in simple_actions:
diplomat_action.perform_simple_action(action)
elif action == freeciv.const.DIPLOMAT_SABOTAGE:
# TODO: spy can choose building
diplomat_action.perform_simple_action(action,
value=freeciv.const.B_LAST+1)
elif action == freeciv.const.DIPLOMAT_STEAL:
# TODO: spy can choose technology
diplomat_action.perform_simple_action(action, value=freeciv.const.A_UNSET)
elif action == freeciv.const.DIPLOMAT_INCITE:
diplomat_action.request_answer(action)
else:
ui.not_implemented()
items = []
for action in diplomat_action.get_actions():
if action in action_titles:
title = action_titles[action]
items.append((title, functools.partial(do_action, action)))
ui.show_list_dialog(items, title=message, titlefont=ui.consolefont)
def popup_incite_dialog(self, diplomat_action, cost, possible):
if not possible:
ui.message('Revolt not possible')
return
if cost > self.get_gold():
ui.message('You don\'t have enough money (needed %d)' % cost)
return
def yes():
diplomat_action.perform_simple_action(freeciv.const.DIPLOMAT_INCITE)
ui.ask('Incite revolt? (costs %d)' % cost, yes)
def popup_unit_diplomat_dialog(self, diplomat_action):
message = 'Subvert enemy unit'
def popup_unit_select_dialog(self, units):
def focus(unit):
unit.focus()
ui.back()
panel = ui.LinearLayoutWidget()
for unit in units:
name = unit.get_name()
callback = functools.partial(focus, unit)
p = ui.HorizontalLayoutWidget()
p.add(ui.Image(unit.get_image(), callback)) # Label(' ' * 10, callback, image=
p.add(ui.Button(name, callback))
panel.add(p)
ui.set_dialog(panel, scroll=True)
def quit(self):
def quit():
self.disconnect()
ui.back(anim=False) # close dialog
ui.back(allow_override=False) # close game
def save():
self.chat('/save')
ui.back()
def save_db():
ui.back()
self.save_and_get_name(lambda path: dropbox.save(path))
def show_help():
help.show()
menu = ui.Menu(for_dialog=True)
menu.add('Quit', quit)
menu.add('Save', save)
menu.add('Save to Dropbox', save_db, button_class=dropbox.DBButton)
menu.add('Help', show_help)
ui.set_dialog(menu, scroll=True)
def city_dialog_is_open(self, city):
return bool(self.get_city_dialog(city))
def refresh_city_dialog(self, city):
if self.city_dialog_is_open(city):
self.get_city_dialog(city).refresh()
def get_city_dialog(self, city):
if isinstance(ui.get_screen(), ui.ScrollWrapper):
item = ui.get_screen().item
else:
item = ui.get_screen()
if isinstance(item, citydlg.Dialog) and item.city == city:
return item
else:
return None
def update_taxes(self):
return self.ui.taxes_panel.update()
def create_meeting(self, counterpart):
return diplodialog.Meeting(self, counterpart)
def set_turn_button_enable(self, enabled):
dialog_state = self.turn_loading_dialog.is_opened() if self.turn_loading_dialog else False
if not enabled and not dialog_state:
self.turn_loading_dialog = ui.set_dialog(ui.Label('ending turn...'))
elif dialog_state and enabled:
self.turn_loading_dialog.close()
def handle_authentication_req(self, prompt):
password = uidialog.inputbox(prompt)
if password:
self.authenticate(password)
def popup_notify(self, text):
panel = ui.LinearLayoutWidget()
panel.add(ui.Label(text, font=ui.consolefont))
panel.add(ui.Button('Okay', ui.back))
ui.set_dialog(panel)
class ScreenWidget(ui.AbsoluteLayoutWidget):
def __init__(self, client):
super(ScreenWidget, self).__init__()
width = 150
def make_button(name, func):
return ui.Button(name, func,
force_width=width - 15,
padding=1)
self.client = client
if features.get('app.map_tiles'):
import maptiles
self.map = maptiles.MapWidget(client)
else:
self.map = mapdrawer.MapWidget(client)
self.overview = OverviewWidget(client)
self.console = ConsoleWidget(client)
self.menu = gamemenu.Menu(client, ui.screen_width - width)
self.end_turn_button = make_button('End turn', self.client.end_turn)
self.empire_button = make_button('Empire', self.empire_dialog)
self.taxes_panel = TaxesPanel(client)
self.left_panel = ui.LinearLayoutWidget(spacing=0, center=True, marginleft=0)
self.left_panel.widget_background = (190, 160, 110, 170)
self.add(self.map, (0, 0))
self.add(self.menu, (width, 0), align=ui.BOTTOM)
self.add(self.left_panel, (0, 0), align=ui.LEFT)
self.update_layout()
self.left_panel.add(self.overview)
self.left_panel.add(self.console.scroll)
self.left_panel.add(self.end_turn_button)
self.left_panel.add(self.taxes_panel)
self.left_panel.add(ui.Spacing(0, 10))
self.left_panel.add(self.empire_button)
self.left_panel.add(self.menu.left_widget)
if features.get('app.full_label_toggle_button'):
full_label_toggle_button = ui.Button('city labels', client.toggle_full_labels, font=ui.consolefont)
self.left_panel.add(full_label_toggle_button)
# key_end_turn()
self.focus = self.map
def resize_left_pane(self, width):
self.console.width = width
self.console.scroll.width = width
self.console.scroll.height = 100
def empire_dialog(self):
empiredlg.EmpireDialog(self.client).show()
def tick(self):
self.map.size = ui.screen_width, ui.screen_height
self.client.tick()
super(ScreenWidget, self).tick()
def event(self, ev):
if ev.type == graphics.const.KEYDOWN and ev.key == graphics.const.K_F1:
self.client.quit()
else:
return super(ScreenWidget, self).event(ev)
def back(self):
self.map.back()
class TaxesPanel(ui.LinearLayoutWidget):
def __init__(self, client):
ui.LinearLayoutWidget.__init__(self, spacing=10)
self.client = client
self.update()
def update(self):
self.items = []
self.update_year()
self.update_gold_label()
self.update_tax()
self.update_layout()
def update_gold_label(self):
plus = self.client.get_gold_income()
if plus >= 0:
plus = '+%d' % plus
else:
plus = '%s' % plus
self.add(ui.Label('Gold: %d (%s)' % (self.client.get_gold(), plus), font=ui.consolefont))
def update_tax(self):
panel = ui.HorizontalLayoutWidget()
tax, lux, science = self.client.get_tax_values()
science_img = icons.get_small_image('scientist')
tax_img = icons.get_small_image('taxman')
lux_img = icons.get_small_image('elvis')
def add(value, img):
for i in xrange(int(value/10)):
panel.add(ui.Image(img))
add(tax, tax_img)
add(lux, lux_img)
add(science, science_img)
self.add(panel)
def update_year(self):
year = self.client.get_current_year_name()
self.add(ui.Label('Year: ' + year, font=ui.consolefont))
def event(self, ev):
if ev.type == graphics.const.MOUSEBUTTONDOWN:
self.callback()
def callback(self):
ui.set_dialog(TaxesDialog(self.client))
class TaxesDialog(ui.LinearLayoutWidget):
def __init__(self, client):
ui.LinearLayoutWidget.__init__(self)
self.client = client
self.update()
def update(self):
self.items = []
self.update_tax()
self.update_buttons()
self.update_layout()
def update_tax(self):
panel = ui.LinearLayoutWidget()
tpl = list(self.client.get_tax_values())
science_img = icons.get_small_image('scientist')
tax_img = icons.get_small_image('taxman')
lux_img = icons.get_small_image('elvis')
font = ui.bigfont
def change(type, val):
tpl[type] += val * 10
tpl[0] -= val * 10
a, b, c = map(lambda x: max(0, min(100, x)), tpl)
self.client.set_tax_values(a, b, c)
ui.get_screen().tick()
self.update()
ui.execute_later(self.update)
def add(type, img):
# spacing here are hard-coded so the layout breaks when font is changed
img = img.scale((30, 45))
line = ui.HorizontalLayoutWidget()
img_l = ui.LinearLayoutWidget()
img_l.add(ui.Image(img))
img_l.add(ui.Spacing(0, 6))
line.add(img_l)
if type != 0:
line.add(ui.Spacing(10, 0))
w = 40
line.add(ui.Button(' - ', functools.partial(change, type, -1), font=font,
force_width=w))
line.add(ui.Spacing(10, 0))
line.add(ui.Button(' + ', functools.partial(change, type, +1), font=font,
force_width=w))
else:
line.add(ui.Spacing(116, 0))
line.add(ui.Spacing(10, 0))
line.add(ui.Label('%d%%' % tpl[type], font=font))
panel.add(line)
add(0, tax_img)
add(1, lux_img)
add(2, science_img)
self.add(panel)
def update_buttons(self):
self.add(ui.Button('Change government', self.change_goverment))
def change_goverment(self):
def commit_government_change(gov):
gov.change_to()
ui.back()
panel = ui.LinearLayoutWidget()
for gov in self.client.get_governments():
panel.add(ui.Label(gov.name, functools.partial(commit_government_change, gov)))
ui.set_dialog(panel, scroll=True)
class OverviewWidget(ui.Widget):
def __init__(self, client):
self.client = client
self.scale_width = 150
@property
def size(self):
w, h = self.client.get_overview_size()
ratio = h / float(w)
return (self.scale_width, int(ratio * self.scale_width))
def tick(self):
pass
def event(self, ev):
if ev.type == graphics.const.MOUSEBUTTONDOWN:
w, h = self.client.get_overview_size()
scale = float(w) / self.scale_width
self.client.overview_click(int(ev.pos[0] * scale), int(ev.pos[1] * scale))
def draw(self, surf, pos):
self.client.draw_overview(surf, pos, scale=self.size)
class ConsoleWidget(ui.LinearLayoutWidget):
def __init__(self, client):
super(ConsoleWidget, self).__init__(spacing=0)
self.client = client
self.width = 0
self.scroll = ConsoleScrollWrapper(self)
self.shown = False
@property
def size(self):
return (self.width, self._size[1])
def line(self, text):
self.add(ui.Label(text, font=ui.consolefont))
def clear(self):
self.items = []
def draw(self, surf, pos):
if self.shown:
surf.draw_rect((255, 255, 255, 170), pos + self._size, 0, blend=graphics.MODE_NONE)
super(ConsoleWidget, self).draw(surf, pos)
def draw_clipped(self, surf, pos, clip):
rect = graphics.Rect(clip)
cliptex = graphics.create_surface(rect[2], rect[3])
relpos = ui.layout._subpoints(pos, (rect[0], rect[1]))
self.draw(cliptex, relpos)
surf.blit(cliptex, (rect[0], rect[1]))
def event(self, ev):
if ev.type == graphics.const.MOUSEBUTTONDOWN:
myabspos = ui.layout._subpoints(ev.abs_pos, ev.pos)
self.shown = True
ui.add_overlay(self.scroll, myabspos)
elif ev.type == graphics.const.MOUSEBUTTONUP:
self.shown = False
if self.scroll in ui.overlays:
ui.overlays.remove(self.scroll)
class ConsoleScrollWrapper(ui.ScrollWrapper):
def get_clip(self):
return (self.item._size[0], self.size[1])
def canceled_event(self, event):
self.item.event(event)
def init():
gamemenu.init()
citydlg.init()
| gpl-2.0 |
tensorflow/estimator | tensorflow_estimator/python/estimator/canned/dnn_test_fc_v2.py | 1 | 19054 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py with feature_column_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from unittest.mock import patch
from absl.testing import parameterized
import numpy as np
import six
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.feature_column import feature_column_v2
from tensorflow_estimator.python.estimator.canned import dnn
from tensorflow_estimator.python.estimator.canned import dnn_testing_utils
from tensorflow_estimator.python.estimator.canned import prediction_keys
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.inputs import numpy_io
from tensorflow_estimator.python.estimator.inputs import pandas_io
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifierV2(*args, **kwargs)
class DNNModelFnV2Test(dnn_testing_utils.BaseDNNModelFnTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(
self, dnn.dnn_model_fn_v2, fc_impl=feature_column_v2)
class DNNLogitFnV2Test(dnn_testing_utils.BaseDNNLogitFnTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNLogitFnTest.__init__(
self, dnn.dnn_logit_fn_builder_v2, fc_impl=feature_column_v2)
class DNNWarmStartingV2Test(dnn_testing_utils.BaseDNNWarmStartingTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNWarmStartingTest.__init__(
self, _dnn_classifier_fn, _dnn_regressor_fn, fc_impl=feature_column_v2)
class DNNClassifierEvaluateV2Test(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn, fc_impl=feature_column_v2)
class DNNClassifierPredictV2Test(dnn_testing_utils.BaseDNNClassifierPredictTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn, fc_impl=feature_column_v2)
class DNNClassifierTrainV2Test(dnn_testing_utils.BaseDNNClassifierTrainTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn, fc_impl=feature_column_v2)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressorV2(*args, **kwargs)
class DNNRegressorEvaluateV2Test(dnn_testing_utils.BaseDNNRegressorEvaluateTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn, fc_impl=feature_column_v2)
class DNNRegressorPredictV2Test(dnn_testing_utils.BaseDNNRegressorPredictTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn, fc_impl=feature_column_v2)
class DNNRegressorTrainV2Test(dnn_testing_utils.BaseDNNRegressorTrainTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn, fc_impl=feature_column_v2)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = tf.queue.FIFOQueue(capacity=100, dtypes=queue_dtypes)
tf.compat.v1.train.queue_runner.add_queue_runner(
tf.compat.v1.train.queue_runner.QueueRunner(
input_queue, [input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorIntegrationTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, batch_size):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est = dnn.DNNRegressorV2(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUATE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierIntegrationTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, n_classes, batch_size):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est = dnn.DNNClassifierV2(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUATE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data}, y=y_data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data}, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([1], tf.dtypes.int64),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
class DNNTrainingMode(tf.test.TestCase):
"""Tests that training mode propagates to feature columns correctly."""
def setUp(self):
self._model_dir = tempfile.mkdtemp()
self._label_dimension = 1
self._batch_size = 10
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _create_data(self):
data = np.linspace(
0., 2., self._batch_size * self._label_dimension, dtype=np.float32)
return data.reshape(self._batch_size, self._label_dimension)
def _get_estimator(self):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(self._label_dimension,))
]
return dnn.DNNRegressorV2(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=self._label_dimension,
model_dir=self._model_dir)
def test_train_vs_eval_mode(self):
data = self._create_data()
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=self._batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=self._batch_size, shuffle=False)
est = self._get_estimator()
with patch.object(
tf.compat.v2.keras.layers.DenseFeatures, 'call',
return_value=data) as mock_dense_features_call:
est.train(train_input_fn, steps=10)
est.evaluate(eval_input_fn)
train_args, eval_args = mock_dense_features_call.call_args_list
# DenseFeature should have been called with training = True in train.
_, train_training_kwarg = train_args
self.assertTrue(train_training_kwarg['training'])
# DenseFeature should have been called with training = False in eval.
_, eval_training_kwarg = eval_args
self.assertFalse(eval_training_kwarg['training'])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
leotrubach/sourceforge-allura | Allura/allura/config/middleware.py | 3 | 7033 | # -*- coding: utf-8 -*-
"""WSGI middleware initialization for the allura application."""
import mimetypes
import pylons
import pylons.middleware
import tg
import tg.error
pylons.c = pylons.tmpl_context
pylons.g = pylons.app_globals
import pkg_resources
from tg import config
from paste.deploy.converters import asbool
from paste.registry import RegistryManager
from beaker.middleware import SessionMiddleware
from routes.middleware import RoutesMiddleware
from pylons.middleware import StatusCodeRedirect
import activitystream
import ew
import ming
from ming.orm.middleware import MingMiddleware
from allura.config.app_cfg import base_config
from allura.config.environment import load_environment
from allura.config.app_cfg import ForgeConfig
from allura.lib.custom_middleware import AlluraTimerMiddleware
from allura.lib.custom_middleware import SSLMiddleware
from allura.lib.custom_middleware import StaticFilesMiddleware
from allura.lib.custom_middleware import CSRFMiddleware
from allura.lib.custom_middleware import LoginRedirectMiddleware
from allura.lib import patches
from allura.lib import helpers as h
__all__ = ['make_app']
# Use base_config to setup the necessary PasteDeploy application factory.
# make_base_app will wrap the TG2 app with all the middleware it needs.
make_base_app = base_config.setup_tg_wsgi_app(load_environment)
def make_app(global_conf, full_stack=True, **app_conf):
root = app_conf.get('override_root', 'root')
return _make_core_app(root, global_conf, full_stack, **app_conf)
def _make_core_app(root, global_conf, full_stack=True, **app_conf):
"""
Set allura up with the settings found in the PasteDeploy configuration
file used.
:param root: The controller module containing the TG root
:param global_conf: The global settings for allura (those
defined under the ``[DEFAULT]`` section).
:type global_conf: dict
:param full_stack: Should the whole TG2 stack be set up?
:type full_stack: str or bool
:return: The allura application with all the relevant middleware
loaded.
This is the PasteDeploy factory for the allura application.
``app_conf`` contains all the application-specific settings (those defined
under ``[app:main]``.
"""
# Run all the initialization code here
mimetypes.init(
[pkg_resources.resource_filename('allura', 'etc/mime.types')]
+ mimetypes.knownfiles)
patches.apply()
# Configure MongoDB
ming.configure(**app_conf)
# Configure ActivityStream
activitystream.configure(**app_conf)
# Configure EW variable provider
ew.render.TemplateEngine.register_variable_provider(get_tg_vars)
# Create base app
base_config = ForgeConfig(root)
load_environment = base_config.make_load_environment()
# Code adapted from tg.configuration, replacing the following lines:
# make_base_app = base_config.setup_tg_wsgi_app(load_environment)
# app = make_base_app(global_conf, full_stack=True, **app_conf)
# Configure the Pylons environment
load_environment(global_conf, app_conf)
if config.get('zarkov.host'):
try:
import zmq
except ImportError:
raise ImportError, "Unable to import the zmq library. Please"\
" check that zeromq is installed or comment out"\
" the zarkov.host setting in your ini file."
app = tg.TGApp()
if asbool(config.get('auth.method', 'local')=='sfx'):
import sfx.middleware
d = h.config_with_prefix(config, 'auth.')
d.update(h.config_with_prefix(config, 'sfx.'))
app = sfx.middleware.SfxMiddleware(app, d)
# Required for pylons
app = RoutesMiddleware(app, config['routes.map'])
# Required for sessions
app = SessionMiddleware(app, config)
# Converts exceptions to HTTP errors, shows traceback in debug mode
app = tg.error.ErrorHandler(app, global_conf, **config['pylons.errorware'])
# Redirect some status codes to /error/document
if asbool(config['debug']):
app = StatusCodeRedirect(app, base_config.handle_status_codes)
else:
app = StatusCodeRedirect(app, base_config.handle_status_codes + [500])
# Redirect 401 to the login page
app = LoginRedirectMiddleware(app)
# Add instrumentation
app = AlluraTimerMiddleware(app, app_conf)
# Clear cookies when the CSRF field isn't posted
if not app_conf.get('disable_csrf_protection'):
app = CSRFMiddleware(app, '_session_id')
# Setup the allura SOPs
app = allura_globals_middleware(app)
# Ensure https for logged in users, http for anonymous ones
if asbool(app_conf.get('auth.method', 'local')=='sfx'):
app = SSLMiddleware(app, app_conf.get('no_redirect.pattern'))
# Setup resource manager, widget context SOP
app = ew.WidgetMiddleware(
app,
compress=not asbool(global_conf['debug']),
# compress=True,
script_name=app_conf.get('ew.script_name', '/_ew_resources/'),
url_base=app_conf.get('ew.url_base', '/_ew_resources/'),
extra_headers=eval(app_conf.get('ew.extra_headers', 'None')))
# Make sure that the wsgi.scheme is set appropriately when we
# have the funky HTTP_X_SFINC_SSL environ var
if asbool(app_conf.get('auth.method', 'local')=='sfx'):
app = set_scheme_middleware(app)
# Handle static files (by tool)
app = StaticFilesMiddleware(app, app_conf.get('static.script_name'))
# Handle setup and flushing of Ming ORM sessions
app = MingMiddleware(app)
# Set up the registry for stacked object proxies (SOPs).
# streaming=true ensures they won't be cleaned up till
# the WSGI application's iterator is exhausted
app = RegistryManager(app, streaming=True)
return app
def set_scheme_middleware(app):
def SchemeMiddleware(environ, start_response):
if asbool(environ.get('HTTP_X_SFINC_SSL', 'false')):
environ['wsgi.url_scheme'] = 'https'
return app(environ, start_response)
return SchemeMiddleware
def allura_globals_middleware(app):
def AlluraGlobalsMiddleware(environ, start_response):
import allura.lib.security
import allura.lib.app_globals
registry = environ['paste.registry']
registry.register(allura.credentials, allura.lib.security.Credentials())
return app(environ, start_response)
return AlluraGlobalsMiddleware
def get_tg_vars(context):
import pylons, tg
from allura.lib import helpers as h
from urllib import quote, quote_plus
context.setdefault('g', pylons.g)
context.setdefault('c', pylons.c)
context.setdefault('h', h)
context.setdefault('request', pylons.request)
context.setdefault('response', pylons.response)
context.setdefault('url', pylons.url)
context.setdefault('tg', dict(
config=tg.config,
flash_obj=tg.flash,
quote=quote,
quote_plus=quote_plus,
url=tg.url))
| apache-2.0 |
nitzmahone/ansible | lib/ansible/utils/module_docs_fragments/openswitch.py | 58 | 3815 | #
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport. Note this argument
does not affect the SSH argument.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device. This value applies to either I(cli) or I(rest). The port
value will default to the appropriate transport common port if
none is provided in the task. (cli=22, http=80, https=443). Note
this argument does not affect the SSH transport.
default: 0 (use common port)
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
either the CLI login or the eAPI authentication depending on which
transport is used. Note this argument does not affect the SSH
transport. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This is a common argument used for either I(cli)
or I(rest) transports. Note this argument does not affect the SSH
transport. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This argument is only used for the I(cli)
transports. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
transport:
description:
- Configures the transport connection to use when connecting to the
remote device. The transport argument supports connectivity to the
device over ssh, cli or REST.
required: true
default: ssh
choices: ['ssh', 'cli', 'rest']
use_ssl:
description:
- Configures the I(transport) to use SSL if set to true only when the
I(transport) argument is configured as rest. If the transport
argument is not I(rest), this value is ignored.
type: bool
default: 'yes'
provider:
description:
- Convenience method that allows all I(openswitch) arguments to be passed as
a dict object. All constraints (required, choices, etc) must be
met either by individual arguments or values in this dict.
"""
| gpl-3.0 |
SDSG-Invenio/invenio | invenio/legacy/refextract/xml.py | 15 | 35234 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import absolute_import
import re
from xml.sax.saxutils import escape as encode_for_xml
from datetime import datetime
from invenio.legacy.refextract.regexs import re_num
from invenio.legacy.docextract.utils import write_message
from invenio.legacy.refextract.config import \
CFG_REFEXTRACT_TAG_ID_REFERENCE, \
CFG_REFEXTRACT_IND1_REFERENCE, \
CFG_REFEXTRACT_IND2_REFERENCE, \
CFG_REFEXTRACT_SUBFIELD_MARKER, \
CFG_REFEXTRACT_SUBFIELD_AUTH, \
CFG_REFEXTRACT_SUBFIELD_TITLE, \
CFG_REFEXTRACT_SUBFIELD_MISC, \
CGF_REFEXTRACT_SEMI_COLON_MISC_TEXT_SENSITIVITY, \
CFG_REFEXTRACT_SUBFIELD_REPORT_NUM, \
CFG_REFEXTRACT_XML_RECORD_OPEN, \
CFG_REFEXTRACT_CTRL_FIELD_RECID, \
CFG_REFEXTRACT_TAG_ID_EXTRACTION_STATS, \
CFG_REFEXTRACT_IND1_EXTRACTION_STATS, \
CFG_REFEXTRACT_IND2_EXTRACTION_STATS, \
CFG_REFEXTRACT_SUBFIELD_EXTRACTION_STATS, \
CFG_REFEXTRACT_SUBFIELD_EXTRACTION_TIME, \
CFG_REFEXTRACT_SUBFIELD_EXTRACTION_VERSION, \
CFG_REFEXTRACT_VERSION, \
CFG_REFEXTRACT_XML_RECORD_CLOSE, \
CFG_REFEXTRACT_SUBFIELD_URL_DESCR, \
CFG_REFEXTRACT_SUBFIELD_URL, \
CFG_REFEXTRACT_SUBFIELD_DOI, \
CGF_REFEXTRACT_ADJACENT_AUTH_MISC_SEPARATION, \
CFG_REFEXTRACT_SUBFIELD_QUOTED, \
CFG_REFEXTRACT_SUBFIELD_ISBN, \
CFG_REFEXTRACT_SUBFIELD_PUBLISHER, \
CFG_REFEXTRACT_SUBFIELD_YEAR, \
CFG_REFEXTRACT_SUBFIELD_BOOK
from invenio import config
CFG_INSPIRE_SITE = getattr(config, 'CFG_INSPIRE_SITE', False)
def format_marker(line_marker):
if line_marker:
num_match = re_num.search(line_marker)
if num_match:
line_marker = num_match.group(0)
return line_marker
def create_xml_record(counts, recid, xml_lines, status_code=0):
"""Given a series of MARC XML-ized reference lines and a record-id, write a
MARC XML record to the stdout stream. Include in the record some stats
for the extraction job.
The printed MARC XML record will essentially take the following
structure:
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="999" ind1="C" ind2="5">
[...]
</datafield>
[...]
<datafield tag="999" ind1="C" ind2="6">
<subfield code="a">
Invenio/X.XX.X refextract/X.XX.X-timestamp-err-repnum-title-URL-misc
</subfield>
</datafield>
</record>
Timestamp, error(code), reportnum, title, URL, and misc will are of
course take the relevant values.
@param status_code: (integer)the status of reference-extraction for the
given record: was there an error or not? 0 = no error; 1 = error.
@param count_reportnum: (integer) - the number of institutional
report-number citations found in the document's reference lines.
@param count_title: (integer) - the number of journal title citations
found in the document's reference lines.
@param count_url: (integer) - the number of URL citations found in the
document's reference lines.
@param count_misc: (integer) - the number of sections of miscellaneous
text (i.e. 999C5$m) from the document's reference lines.
@param count_auth_group: (integer) - the total number of author groups
identified ($h)
@param recid: (string) - the record-id of the given document. (put into
001 field.)
@param xml_lines: (list) of strings. Each string in the list contains a
group of MARC XML 999C5 datafields, making up a single reference line.
These reference lines will make up the document body.
@return: The entire MARC XML textual output, plus recognition statistics.
"""
out = []
## Start with the opening record tag:
out += u"%(record-open)s\n" \
% {'record-open': CFG_REFEXTRACT_XML_RECORD_OPEN, }
## Display the record-id controlfield:
out += \
u""" <controlfield tag="%(cf-tag-recid)s">%(recid)d</controlfield>\n""" \
% {'cf-tag-recid' : CFG_REFEXTRACT_CTRL_FIELD_RECID,
'recid' : recid,
}
## Loop through all xml lines and add them to the output string:
out.extend(xml_lines)
## add the 999C6 status subfields:
out += u""" <datafield tag="%(df-tag-ref-stats)s" ind1="%(df-ind1-ref-stats)s" ind2="%(df-ind2-ref-stats)s">
<subfield code="%(sf-code-ref-stats)s">%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s</subfield>
<subfield code="%(sf-code-ref-time)s">%(timestamp)s</subfield>
<subfield code="%(sf-code-ref-version)s">%(version)s</subfield>
</datafield>\n""" \
% {'df-tag-ref-stats' : CFG_REFEXTRACT_TAG_ID_EXTRACTION_STATS,
'df-ind1-ref-stats' : CFG_REFEXTRACT_IND1_EXTRACTION_STATS,
'df-ind2-ref-stats' : CFG_REFEXTRACT_IND2_EXTRACTION_STATS,
'sf-code-ref-stats' : CFG_REFEXTRACT_SUBFIELD_EXTRACTION_STATS,
'sf-code-ref-time' : CFG_REFEXTRACT_SUBFIELD_EXTRACTION_TIME,
'sf-code-ref-version': CFG_REFEXTRACT_SUBFIELD_EXTRACTION_VERSION,
'version' : CFG_REFEXTRACT_VERSION,
'timestamp' : datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'status' : status_code,
'reportnum' : counts['reportnum'],
'title' : counts['title'],
'author' : counts['auth_group'],
'url' : counts['url'],
'doi' : counts['doi'],
'misc' : counts['misc'],
}
## Now add the closing tag to the record:
out += u"%(record-close)s\n" \
% {'record-close' : CFG_REFEXTRACT_XML_RECORD_CLOSE, }
## Be sure to call this BEFORE compress_subfields
out = filter_processed_references(''.join(out))
## Compress mulitple 'm' subfields in a datafield
out = compress_subfields(out, CFG_REFEXTRACT_SUBFIELD_MISC)
## Compress multiple 'h' subfields in a datafield
out = compress_subfields(out, CFG_REFEXTRACT_SUBFIELD_AUTH)
return out
def build_xml_citations(splitted_citations, line_marker):
return [build_xml_citation(citation_elements, line_marker) \
for citation_elements in splitted_citations]
def build_xml_citation(citation_elements, line_marker, inspire_format=None):
""" Create the MARC-XML string of the found reference information which was taken
from a tagged reference line.
@param citation_elements: (list) an ordered list of dictionary elements,
with each element corresponding to a found piece of information from a reference line.
@param line_marker: (string) The line marker for this single reference line (e.g. [19])
@return xml_line: (string) The MARC-XML representation of the list of reference elements
"""
if inspire_format is None:
inspire_format = CFG_INSPIRE_SITE
## Begin the datafield element
xml_line = start_datafield_element(line_marker)
## This will hold the ordering of tags which have been appended to the xml line
## This list will be used to control the desisions involving the creation of new citation lines
## (in the event of a new set of authors being recognised, or strange title ordering...)
line_elements = []
## This is a list which will hold the current 'over-view' of a single reference line,
## as a list of lists, where each list corresponds to the contents of a datafield element
## in the xml mark-up
citation_structure = []
auth_for_ibid = None
for element in citation_elements:
## Before going onto checking 'what' the next element is, handle misc text and semi-colons
## Multiple misc text subfields will be compressed later
## This will also be the only part of the code that deals with MISC tag_typed elements
if element['misc_txt'].strip(".,:;- []"):
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_MISC,
element['misc_txt'].strip(".,:;- []"))
# Now handle the type dependent actions
# TITLE
if element['type'] == "JOURNAL":
# Select the journal title output format
if inspire_format:
# ADD to current datafield
xml_line += """
<subfield code="%(sf-code-ref-title)s">%(title)s,%(volume)s,%(page)s</subfield>""" \
% {'sf-code-ref-title': CFG_REFEXTRACT_SUBFIELD_TITLE,
'title' : encode_for_xml(element['title']),
'volume' : encode_for_xml(element['volume']),
'page' : encode_for_xml(element['page']),
}
else:
# ADD to current datafield
xml_line += """
<subfield code="%(sf-code-ref-title)s">%(title)s %(volume)s (%(year)s) %(page)s</subfield>""" \
% {'sf-code-ref-title': CFG_REFEXTRACT_SUBFIELD_TITLE,
'title' : encode_for_xml(element['title']),
'volume' : encode_for_xml(element['volume']),
'year' : encode_for_xml(element['year']),
'page' : encode_for_xml(element['page']),
}
# Now, if there are any extra (numeration based) IBID's after this title
if len(element['extra_ibids']) > 0:
# At least one IBID is present, these are to be outputted each into their own datafield
for ibid in element['extra_ibids']:
# %%%%% Set as NEW citation line %%%%%
(xml_line, auth_for_ibid) = append_datafield_element(line_marker,
citation_structure,
line_elements,
auth_for_ibid,
xml_line)
if inspire_format:
xml_line += """
<subfield code="%(sf-code-ref-title)s">%(title)s,%(volume)s,%(page)s</subfield>""" \
% {'sf-code-ref-title': CFG_REFEXTRACT_SUBFIELD_TITLE,
'title' : encode_for_xml(ibid['title']),
'volume' : encode_for_xml(ibid['volume']),
'page' : encode_for_xml(ibid['page']),
}
else:
xml_line += """
<subfield code="%(sf-code-ref-title)s">%(title)s %(volume)s (%(year)s) %(page)s</subfield>""" \
% {'sf-code-ref-title': CFG_REFEXTRACT_SUBFIELD_TITLE,
'title' : encode_for_xml(ibid['title']),
'volume' : encode_for_xml(ibid['volume']),
'year' : encode_for_xml(ibid['year']),
'page' : encode_for_xml(ibid['page']),
}
# Add a Title element to the past elements list, since we last found an IBID
line_elements.append(element)
# REPORT NUMBER
elif element['type'] == "REPORTNUMBER":
# ADD to current datafield
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_REPORT_NUM,
element['report_num'])
line_elements.append(element)
# URL
elif element['type'] == "URL":
if element['url_string'] == element['url_desc']:
# Build the datafield for the URL segment of the reference line:
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_URL,
element['url_string'])
# Else, in the case that the url string and the description differ in some way, include them both
else:
# Build the datafield for the URL segment of the reference line:
xml_line += """
<subfield code="%(sf-code-ref-url)s">%(url)s</subfield>
<subfield code="%(sf-code-ref-url-desc)s">%(url-desc)s</subfield>""" \
% {'sf-code-ref-url' : CFG_REFEXTRACT_SUBFIELD_URL,
'sf-code-ref-url-desc': CFG_REFEXTRACT_SUBFIELD_URL_DESCR,
'url' : encode_for_xml(element['url_string']),
'url-desc' : encode_for_xml(element['url_desc'])
}
line_elements.append(element)
# DOI
elif element['type'] == "DOI":
## Split on hitting another DOI in the same line
if is_in_line_elements("DOI", line_elements):
## %%%%% Set as NEW citation line %%%%%
xml_line, auth_for_ibid = append_datafield_element(line_marker,
citation_structure,
line_elements,
auth_for_ibid,
xml_line)
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_DOI,
element['doi_string'])
line_elements.append(element)
# AUTHOR
elif element['type'] == "AUTH":
value = element['auth_txt']
if element['auth_type'] == 'incl':
value = "(%s)" % value
if is_in_line_elements("AUTH", line_elements) and line_elements[-1]['type'] != "AUTH":
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_MISC,
value)
else:
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_AUTH,
value)
line_elements.append(element)
elif element['type'] == "QUOTED":
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_QUOTED,
element['title'])
line_elements.append(element)
elif element['type'] == "ISBN":
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_ISBN,
element['ISBN'])
line_elements.append(element)
elif element['type'] == "BOOK":
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_QUOTED,
element['title'])
xml_line += '\n <subfield code="%s" />' % \
CFG_REFEXTRACT_SUBFIELD_BOOK
line_elements.append(element)
elif element['type'] == "PUBLISHER":
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_PUBLISHER,
element['publisher'])
line_elements.append(element)
elif element['type'] == "YEAR":
xml_line = append_subfield_element(xml_line,
CFG_REFEXTRACT_SUBFIELD_YEAR,
element['year'])
line_elements.append(element)
# Append the author, if needed for an ibid, for the last element
# in the entire line. Don't bother setting the author to be used
# for ibids, since the line is finished
xml_line += check_author_for_ibid(line_elements, auth_for_ibid)[0]
# Close the ending datafield element
xml_line += "\n </datafield>\n"
return xml_line
def append_subfield_element(xml_line, subfield_code, value):
xml_element = '\n <subfield code="' \
'%(sf-code-ref-auth)s">%(value)s</subfield>' % {
'value' : encode_for_xml(value),
'sf-code-ref-auth' : subfield_code,
}
return xml_line + xml_element
def start_datafield_element(line_marker):
""" Start a brand new datafield element with a marker subfield.
@param line_marker: (string) The line marker which will be the sole
content of the newly created marker subfield. This will always be the
first subfield to be created for a new datafield element.
@return: (string) The string holding the relevant datafield and
subfield tags.
"""
marker_subfield = """
<subfield code="%(sf-code-ref-marker)s">%(marker-val)s</subfield>""" \
% {'sf-code-ref-marker': CFG_REFEXTRACT_SUBFIELD_MARKER,
'marker-val' : encode_for_xml(format_marker(line_marker))}
new_datafield = """ <datafield tag="%(df-tag-ref)s" ind1="%(df-ind1-ref)s" ind2="%(df-ind2-ref)s">%(marker-subfield)s""" \
% {'df-tag-ref' : CFG_REFEXTRACT_TAG_ID_REFERENCE,
'df-ind1-ref' : CFG_REFEXTRACT_IND1_REFERENCE,
'df-ind2-ref' : CFG_REFEXTRACT_IND2_REFERENCE,
'marker-subfield': marker_subfield}
return new_datafield
def dump_or_split_author(misc_txt, line_elements):
"""
Given the list of current elements, and misc text, try to decide how to use this
author for splitting heuristics, and see if it is useful. Returning 'dump' indicates
put this author into misc text, since it had been identified as bad. 'split'
indicates split the line and place this author into the fresh datafield. The empty string
indicates add this author as normal to the current xml datafield.
A line will be split using author information in two situations:
1. When there already exists a previous author group in the same line
2. If the only item in the current line is a title, with no misc text
In both situations, the newly found author element is placed into the newly created
datafield.
This method heavily assumes that the first author group found in a single citation is the
most reliable (In accordance with the IEEE standard, which states that authors should
be written at the beginning of a citation, in the overwhelming majority of cases).
@param misc_txt: (string) The misc text for this reference line
@param line_elements: (list) The list of elements found for this current line
@return: (string) The action to take to deal with this author.
"""
## If an author has already been found in this reference line
if is_in_line_elements("AUTH", line_elements):
## If this author group is directly after another author group,
## with minimal misc text between, then this author group is very likely to be wrong.
if line_elements[-1]['type'] == "AUTH" \
and len(misc_txt) < CGF_REFEXTRACT_ADJACENT_AUTH_MISC_SEPARATION:
return "dump"
## Else, trigger a new reference line
return "split"
## In cases where an author is directly after an alone title (ibid or normal, with no misc),
## Trigger a new reference line
if is_in_line_elements("JOURNAL", line_elements) and len(line_elements) == 1 \
and len(misc_txt) == 0:
return "split"
return ""
def is_in_line_elements(element_type, line_elements):
""" Checks the list of current elements in the line for the given element type """
for i, element in enumerate(line_elements):
if element['type'] == element_type:
return (True, line_elements[i])
return False
def split_on_semi_colon(misc_txt, line_elements, elements_processed, total_elements):
""" Given some misc text, see if there are any semi-colons which may indiciate that
a reference line is in fact two separate citations.
@param misc_txt: (string) The misc_txt to look for semi-colons within.
@param line_elements: (list) The list of single upper-case chars which
represent an element of a reference which has been processed.
@param elements_processed: (integer) The number of elements which have been
*looked at* for this entire reference line, regardless of splits
@param total_elements: (integer) The total number of elements which
have been identified in the *entire* reference line
@return: (string) Dipicting where the semi-colon was found in relation to the
rest of the misc_txt. False if a semi-colon was not found, or one was found
relating to an escaped piece of text.
"""
## If there has already been meaningful information found in the reference
## and there are still elements to be processed beyond the element relating to
## this misc_txt
if (is_in_line_elements("JOURNAL", line_elements) \
or is_in_line_elements("REPORTNUMBER", line_elements) \
or len(misc_txt) >= CGF_REFEXTRACT_SEMI_COLON_MISC_TEXT_SENSITIVITY) \
and elements_processed < total_elements:
if len(misc_txt) >= 4 and \
(misc_txt[-5:] == '&' or misc_txt[-4:] == '<'):
## This is a semi-colon which does not indicate a new citation
return ""
else:
## If a semi-colon is at the end, make sure to append preceeding misc_txt to
## the current datafield element
if misc_txt.strip(" .,")[-1] == ";":
return "after"
## Else, make sure to append the misc_txt to the *newly created datafield element*
elif misc_txt.strip(" .,")[0] == ";":
return "before"
return ""
def check_author_for_ibid(line_elements, author):
""" Given a list of elements for an *entire* reference line, and the current
author element to be used for ibids, check to see if that author element needs
to be inserted into this line, depending on the presence of ibids and whether
or not there is already an author paired with an ibid.
Also, if no ibids are present in the line, see if the author element needs
to be updated, depending on the presence of a normal title and a corresponding
author group.
@param line_elements: List of line elements for the entire processed reference
line
@param author: The current parent author element to be used with an ibid
@return: (tuple) - containing a possible new author subfield, and the parent
author element to be used for future ibids (if any)
"""
## Upon splitting, check for ibids in the previous line,
## If an appropriate author was found, pair it with this ibid.
## (i.e., an author has not been explicitly paired with this ibid already
## and an author exists with the parent title to which this ibid refers)
if is_in_line_elements("JOURNAL", line_elements):
## Get the title element for this line
title_element = is_in_line_elements("JOURNAL", line_elements)[1]
if author != None and not is_in_line_elements("AUTH", line_elements) \
and title_element['is_ibid']:
## Return the author subfield which needs to be appended for an ibid in the line
## No need to reset the author to be used for ibids, since this line holds an ibid
return """
<subfield code="%(sf-code-ref-auth)s">%(authors)s</subfield>""" \
% {'authors' : encode_for_xml(author['auth_txt'].strip('()')),
'sf-code-ref-auth' : CFG_REFEXTRACT_SUBFIELD_AUTH,
}, author
## Set the author for to be used for ibids, when a standard title is present in this line,
## as well as an author
if not title_element['is_ibid'] and is_in_line_elements("AUTH", line_elements):
## Set the author to be used for ibids, in the event that a subsequent ibid is found
## this author element will be repeated.
## This author is only used when an ibid is in a line
## and there is no other author found in the line.
author = is_in_line_elements("AUTH", line_elements)[1]
## If there is no author associated with this head title, clear the author to be used for ibids
elif not title_element['is_ibid']:
author = None
## If an author does not need to be replicated for an ibid, append nothing to the xml line
return "", author
def append_datafield_element(line_marker,
citation_structure,
line_elements,
author,
xml_line):
""" Finish the current datafield element and start a new one, with a new
marker subfield.
@param line_marker: (string) The line marker which will be the sole
content of the newly created marker subfield. This will always be the
first subfield to be created for a new datafield element.
@return new_datafield: (string) The string holding the relevant
datafield and subfield tags.
"""
## Add an author, if one must be added for ibid's, before splitting this line
## Also, if a standard title and an author are both present, save the author for future use
new_datafield, author = check_author_for_ibid(line_elements, author)
xml_line += new_datafield
## Start the new datafield
xml_line += """
</datafield>
<datafield tag="%(df-tag-ref)s" ind1="%(df-ind1-ref)s" ind2="%(df-ind2-ref)s">
<subfield code="%(sf-code-ref-marker)s">%(marker-val)s</subfield>""" \
% {'df-tag-ref' : CFG_REFEXTRACT_TAG_ID_REFERENCE,
'df-ind1-ref' : CFG_REFEXTRACT_IND1_REFERENCE,
'df-ind2-ref' : CFG_REFEXTRACT_IND2_REFERENCE,
'sf-code-ref-marker' : CFG_REFEXTRACT_SUBFIELD_MARKER,
'marker-val' : encode_for_xml(format_marker(line_marker))
}
## add the past elements for end previous citation to the citation_structure list
## (citation_structure is a reference to the initial citation_structure list found in the calling method)
citation_structure.append(line_elements)
## Clear the elements in the referenced list of elements
del line_elements[:]
return xml_line, author
def filter_processed_references(out):
""" apply filters to reference lines found - to remove junk"""
reference_lines = out.split('\n')
# Removes too long and too short m tags
m_restricted, ref_lines = restrict_m_subfields(reference_lines)
if m_restricted:
a_tag = re.compile('\<subfield code=\"a\"\>(.*?)\<\/subfield\>')
for i in range(len(ref_lines)):
# Checks to see that the datafield has the attribute ind2="6",
# Before looking to see if the subfield code attribute is 'a'
if ref_lines[i].find('<datafield tag="999" ind1="C" ind2="6">') != -1 \
and (len(ref_lines) - 1) > i:
# For each line in this datafield element, try to find the subfield whose code attribute is 'a'
while ref_lines[i].find('</datafield>') != -1 and (len(ref_lines) - 1) > i:
i += 1
# <subfield code="a">Invenio/X.XX.X
# refextract/X.XX.X-timestamp-err-repnum-title-URL-misc
# remake the "a" tag for new numbe of "m" tags
if a_tag.search(ref_lines[i]):
data = a_tag.search(ref_lines[i]).group(1)
words1 = data.split()
words2 = words1[-1].split('-')
old_m = int(words2[-1])
words2[-1] = str(old_m - m_restricted)
data1 = '-'.join(words2)
words1[-1] = data1
new_data = ' '.join(words1)
ref_lines[i] = ' <subfield code="a">' + new_data + '</subfield>'
break
new_out = '\n'.join([l for l in [rec.rstrip() for rec in ref_lines] if l])
if len(reference_lines) != len(new_out):
write_message(" * filter results: unfilter references line length is %d and filtered length is %d" \
% (len(reference_lines), len(new_out)), verbose=2)
return new_out
def restrict_m_subfields(reference_lines):
"""Remove complete datafields which hold ONLY a single 'm' subfield,
AND where the misc content is too short or too long to be of use.
Min and max lengths derived by inspection of actual data. """
min_length = 4
max_length = 1024
m_tag = re.compile('\<subfield code=\"m\"\>(.*?)\<\/subfield\>')
filter_list = []
m_restricted = 0
for i in range(len(reference_lines)): # set up initial filter
filter_list.append(1)
for i in range(len(reference_lines)):
if m_tag.search(reference_lines[i]):
if (i - 2) >= 0 and (i + 1) < len(reference_lines):
if reference_lines[i + 1].find('</datafield>') != -1 and \
reference_lines[i - 1].find('<subfield code="o">') != -1 and \
reference_lines[i - 2].find('<datafield') != -1:
## If both of these are true then its a solitary "m" tag
mlength = len(m_tag.search(reference_lines[i]).group(1))
if mlength < min_length or mlength > max_length:
filter_list[i - 2] = filter_list[i - 1] = filter_list[i] = filter_list[i + 1] = 0
m_restricted += 1
new_reference_lines = []
for i in range(len(reference_lines)):
if filter_list[i]:
new_reference_lines.append(reference_lines[i])
return m_restricted, new_reference_lines
def get_subfield_content(line, subfield_code):
""" Given a line (subfield element) and a xml code attribute for a subfield element,
return the contents of the subfield element.
"""
content = line.split('<subfield code="' + subfield_code + '">')[1]
content = content.split('</subfield>')[0]
return content
def compress_subfields(out, subfield_code):
"""
For each datafield, compress multiple subfields of type 'subfield_code' into a single one
e.g. for MISC text, change xml format from:
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">1.</subfield>
<subfield code="m">J. Dukelsky, S. Pittel and G. Sierra</subfield>
<subfield code="s">Rev. Mod. Phys. 76 (2004) 643</subfield>
<subfield code="m">and this is some more misc text</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">2.</subfield>
<subfield code="m">J. von Delft and D.C. Ralph,</subfield>
<subfield code="s">Phys. Rep. 345 (2001) 61</subfield>
</datafield>
to:
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">1.</subfield>
<subfield code="m">J. Dukelsky, S. Pittel and G. Sierra and this is some more misc text</subfield>
<subfield code="s">Rev. Mod. Phys. 76 (2004) 643</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">2.</subfield>
<subfield code="m">J. von Delft and D.C. Ralph,</subfield>
<subfield code="s">Phys. Rep. 345 (2001) 61</subfield>
</datafield>
"""
in_lines = out.split('\n')
# hold the subfield compressed version of the xml, line by line
new_rec_lines = []
# Used to indicate when the selected subfield has already been reached
# inside a particular datafield
position = 0
# Where the concatenated misc text is held before appended at the end
content_text = ""
# Components of the misc subfield elements
subfield_start = " <subfield code=\"%s\">" % subfield_code
subfield_end = "</subfield>"
for line in in_lines:
## If reached the end of the datafield
if line.find('</datafield>') != -1:
if len(content_text) > 0:
# Insert the concatenated misc contents back where it was first
# encountered (dont RIGHTstrip semi-colons, as these may be
# needed for & or <)
if subfield_code == 'm':
content_text = content_text.strip(" ,.").lstrip(" ;")
new_rec_lines[position] = new_rec_lines[position] + \
content_text + subfield_end
content_text = ""
position = 0
new_rec_lines.append(line)
# Found subfield in question, concatenate subfield contents
# for this single datafield
elif line.find(subfield_start.strip()) != -1:
if position == 0:
## Save the position of this found subfield
## for later insertion into the same place
new_rec_lines.append(subfield_start)
position = len(new_rec_lines) - 1
new_text = get_subfield_content(line, subfield_code)
if content_text and new_text:
## Append spaces between merged text, if needed
if (content_text[-1] + new_text[0]).find(" ") == -1:
new_text = " " + new_text
content_text += new_text
else:
new_rec_lines.append(line)
## Create the readable file from the list of lines.
new_out = [l.rstrip() for l in new_rec_lines]
return '\n'.join(filter(None, new_out))
| gpl-2.0 |
rouault/pycsw | etc/mappings.py | 6 | 4218 | # -*- coding: ISO-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2015 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
# sample mappings.py
#
# use this file to bind to an existing alternate metadata database model
#
# steps:
# - update the 'mappings' dict to the column names of your existing database
# - set repository.mappings to the location of this file
MD_CORE_MODEL = {
'typename': 'pycsw:CoreMetadata',
'outputschema': 'http://pycsw.org/metadata',
'mappings': {
'pycsw:Identifier': 'identifier',
'pycsw:Typename': 'typename',
'pycsw:Schema': 'schema',
'pycsw:MdSource': 'mdsource',
'pycsw:InsertDate': 'insert_date',
'pycsw:XML': 'xml',
'pycsw:AnyText': 'anytext',
'pycsw:Language': 'language',
'pycsw:Title': 'title',
'pycsw:Abstract': 'abstract',
'pycsw:Keywords': 'keywords',
'pycsw:KeywordType': 'keywordstype',
'pycsw:Format': 'format',
'pycsw:Source': 'source',
'pycsw:Date': 'date',
'pycsw:Modified': 'date_modified',
'pycsw:Type': 'type',
'pycsw:BoundingBox': 'wkt_geometry',
'pycsw:CRS': 'crs',
'pycsw:AlternateTitle': 'title_alternate',
'pycsw:RevisionDate': 'date_revision',
'pycsw:CreationDate': 'date_creation',
'pycsw:PublicationDate': 'date_publication',
'pycsw:OrganizationName': 'organization',
'pycsw:SecurityConstraints': 'securityconstraints',
'pycsw:ParentIdentifier': 'parentidentifier',
'pycsw:TopicCategory': 'topicategory',
'pycsw:ResourceLanguage': 'resourcelanguage',
'pycsw:GeographicDescriptionCode': 'geodescode',
'pycsw:Denominator': 'denominator',
'pycsw:DistanceValue': 'distancevalue',
'pycsw:DistanceUOM': 'distanceuom',
'pycsw:TempExtent_begin': 'time_begin',
'pycsw:TempExtent_end': 'time_end',
'pycsw:ServiceType': 'servicetype',
'pycsw:ServiceTypeVersion': 'servicetypeversion',
'pycsw:Operation': 'operation',
'pycsw:CouplingType': 'couplingtype',
'pycsw:OperatesOn': 'operateson',
'pycsw:OperatesOnIdentifier': 'operatesonidentifier',
'pycsw:OperatesOnName': 'operatesoname',
'pycsw:Degree': 'degree',
'pycsw:AccessConstraints': 'accessconstraints',
'pycsw:OtherConstraints': 'otherconstraints',
'pycsw:Classification': 'classification',
'pycsw:ConditionApplyingToAccessAndUse': 'conditionapplyingtoaccessanduse',
'pycsw:Lineage': 'lineage',
'pycsw:ResponsiblePartyRole': 'responsiblepartyrole',
'pycsw:SpecificationTitle': 'specificationtitle',
'pycsw:SpecificationDate': 'specificationdate',
'pycsw:SpecificationDateType': 'specificationdatetype',
'pycsw:Creator': 'creator',
'pycsw:Publisher': 'publisher',
'pycsw:Contributor': 'contributor',
'pycsw:Relation': 'relation',
'pycsw:Links': 'links',
}
}
| mit |
txemi/ansible | lib/ansible/modules/cloud/amazon/lightsail.py | 68 | 16237 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: lightsail
short_description: Create or delete a virtual machine instance in AWS Lightsail
description:
- Creates or instances in AWS Lightsail and optionally wait for it to be 'running'.
version_added: "2.4"
author: "Nick Ball (@nickball)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
name:
description:
- Name of the instance
required: true
default : null
zone:
description:
- AWS availability zone in which to launch the instance. Required when state='present'
required: false
default: null
blueprint_id:
description:
- ID of the instance blueprint image. Required when state='present'
required: false
default: null
bundle_id:
description:
- Bundle of specification info for the instance. Required when state='present'
required: false
default: null
user_data:
description:
- Launch script that can configure the instance with additional data
required: false
default: null
key_pair_name:
description:
- Name of the key pair to use with the instance
required: false
default: null
wait:
description:
- Wait for the instance to be in state 'running' before returning. If wait is "no" an ip_address may not be returned
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
requirements:
- "python >= 2.6"
- boto3
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Create a new Lightsail instance, register the instance details
- lightsail:
state: present
name: myinstance
region: us-east-1
zone: us-east-1a
blueprint_id: ubuntu_16_04
bundle_id: nano_1_0
key_pair_name: id_rsa
user_data: " echo 'hello world' > /home/ubuntu/test.txt"
wait_timeout: 500
register: my_instance
- debug:
msg: "Name is {{ my_instance.instance.name }}"
- debug:
msg: "IP is {{ my_instance.instance.publicIpAddress }}"
# Delete an instance if present
- lightsail:
state: absent
region: us-east-1
name: myinstance
'''
RETURN = '''
changed:
description: if a snapshot has been modified/created
returned: always
type: bool
sample:
changed: true
instance:
description: instance data
returned: always
type: dict
sample:
arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87"
blueprint_id: "ubuntu_16_04"
blueprint_name: "Ubuntu"
bundle_id: "nano_1_0"
created_at: "2017-03-27T08:38:59.714000-04:00"
hardware:
cpu_count: 1
ram_size_in_gb: 0.5
is_static_ip: false
location:
availability_zone: "us-east-1a"
region_name: "us-east-1"
name: "my_instance"
networking:
monthly_transfer:
gb_per_month_allocated: 1024
ports:
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 80
protocol: tcp
to_port: 80
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 22
protocol: tcp
to_port: 22
private_ip_address: "172.26.8.14"
public_ip_address: "34.207.152.202"
resource_type: "Instance"
ssh_key_name: "keypair"
state:
code: 16
name: running
support_code: "588307843083/i-0997c97831ee21e33"
username: "ubuntu"
'''
import os
import time
import traceback
try:
import botocore
HAS_BOTOCORE = True
except ImportError:
HAS_BOTOCORE = False
try:
import boto3
except ImportError:
# will be caught by imported HAS_BOTO3
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, HAS_BOTO3, camel_dict_to_snake_dict
def create_instance(module, client, instance_name):
"""
Create an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the new instance.
"""
changed = False
# Check if instance already exists
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
zone = module.params.get('zone')
blueprint_id = module.params.get('blueprint_id')
bundle_id = module.params.get('bundle_id')
key_pair_name = module.params.get('key_pair_name')
user_data = module.params.get('user_data')
user_data = '' if user_data is None else user_data
resp = None
if inst is None:
try:
resp = client.create_instances(
instanceNames=[
instance_name
],
availabilityZone=zone,
blueprintId=blueprint_id,
bundleId=bundle_id,
userData=user_data,
keyPairName=key_pair_name,
)
resp = resp['operations'][0]
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to create instance {0}, error: {1}'.format(instance_name, e))
changed = True
inst = _find_instance_info(client, instance_name)
return (changed, inst)
def delete_instance(module, client, instance_name):
"""
Terminates an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the instance deleted (pre-deletion).
If the instance to be deleted is running
"changed" will be set to False.
"""
# It looks like deleting removes the instance immediately, nothing to wait for
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before deleting
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to delete instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to delete instance {0}.".format(instance_name), exception=traceback.format_exc())
# sleep and retry
time.sleep(10)
# Attempt to delete
if inst is not None:
while not changed and ((wait and wait_max > time.time()) or (not wait)):
try:
client.delete_instance(instanceName=instance_name)
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Error deleting instance {0}, error: {1}'.format(instance_name, e))
# Timed out
if wait and not changed and wait_max <= time.time():
module.fail_json(msg="wait for instance delete timeout at %s" % time.asctime())
return (changed, inst)
def restart_instance(module, client, instance_name):
"""
Reboot an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to reboot
Returns a dictionary of instance information
about the restarted instance
If the instance was not able to reboot,
"changed" will be set to False.
Wait will not apply here as this is an OS-level operation
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to restart instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to restart instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(3)
# send reboot
if inst is not None:
try:
client.reboot_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Unable to reboot instance {0}, error: {1}'.format(instance_name, e))
changed = True
return (changed, inst)
def startstop_instance(module, client, instance_name, state):
"""
Starts or stops an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to start/stop
state: Target state ("running" or "stopped")
Returns a dictionary of instance information
about the instance started/stopped
If the instance was not able to state change,
"changed" will be set to False.
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to start/stop instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(1)
# Try state change
if inst is not None and inst['state']['name'] != state:
try:
if state == 'running':
client.start_instance(instanceName=instance_name)
else:
client.stop_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(instance_name, e))
changed = True
# Grab current instance info
inst = _find_instance_info(client, instance_name)
return (changed, inst)
def core(module):
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg='region must be specified')
client = None
try:
client = boto3_conn(module, conn_type='client', resource='lightsail',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
module.fail_json('Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc())
changed = False
state = module.params['state']
name = module.params['name']
if state == 'absent':
changed, instance_dict = delete_instance(module, client, name)
elif state in ('running', 'stopped'):
changed, instance_dict = startstop_instance(module, client, name, state)
elif state == 'restarted':
changed, instance_dict = restart_instance(module, client, name)
elif state == 'present':
changed, instance_dict = create_instance(module, client, name)
module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(instance_dict))
def _find_instance_info(client, instance_name):
''' handle exceptions where this function is called '''
inst = None
try:
inst = client.get_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
raise
return inst['instance']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted']),
zone=dict(type='str'),
blueprint_id=dict(type='str'),
bundle_id=dict(type='str'),
key_pair_name=dict(type='str'),
user_data=dict(type='str'),
wait=dict(type='bool', default=True),
wait_timeout=dict(default=300),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='Python module "boto3" is missing, please install it')
if not HAS_BOTOCORE:
module.fail_json(msg='Python module "botocore" is missing, please install it')
try:
core(module)
except (botocore.exceptions.ClientError, Exception) as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
rogerwang/chromium | third_party/tlslite/tlslite/utils/compat.py | 361 | 4060 | """Miscellaneous functions to mask Python version differences."""
import sys
import os
if sys.version_info < (2,2):
raise AssertionError("Python 2.2 or later required")
if sys.version_info < (2,3):
def enumerate(collection):
return zip(range(len(collection)), collection)
class Set:
def __init__(self, seq=None):
self.values = {}
if seq:
for e in seq:
self.values[e] = None
def add(self, e):
self.values[e] = None
def discard(self, e):
if e in self.values.keys():
del(self.values[e])
def union(self, s):
ret = Set()
for e in self.values.keys():
ret.values[e] = None
for e in s.values.keys():
ret.values[e] = None
return ret
def issubset(self, other):
for e in self.values.keys():
if e not in other.values.keys():
return False
return True
def __nonzero__( self):
return len(self.values.keys())
def __contains__(self, e):
return e in self.values.keys()
def __iter__(self):
return iter(set.values.keys())
if os.name != "java":
import array
def createByteArraySequence(seq):
return array.array('B', seq)
def createByteArrayZeros(howMany):
return array.array('B', [0] * howMany)
def concatArrays(a1, a2):
return a1+a2
def bytesToString(bytes):
return bytes.tostring()
def stringToBytes(s):
bytes = createByteArrayZeros(0)
bytes.fromstring(s)
return bytes
import math
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
BaseException = Exception
import sys
import traceback
def formatExceptionTrace(e):
newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
return newStr
else:
#Jython 2.1 is missing lots of python 2.3 stuff,
#which we have to emulate here:
#NOTE: JYTHON SUPPORT NO LONGER WORKS, DUE TO USE OF GENERATORS.
#THIS CODE IS LEFT IN SO THAT ONE JYTHON UPDATES TO 2.2, IT HAS A
#CHANCE OF WORKING AGAIN.
import java
import jarray
def createByteArraySequence(seq):
if isinstance(seq, type("")): #If it's a string, convert
seq = [ord(c) for c in seq]
return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed
def createByteArrayZeros(howMany):
return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed
def concatArrays(a1, a2):
l = list(a1)+list(a2)
return createByteArraySequence(l)
#WAY TOO SLOW - MUST BE REPLACED------------
def bytesToString(bytes):
return "".join([chr(b) for b in bytes])
def stringToBytes(s):
bytes = createByteArrayZeros(len(s))
for count, c in enumerate(s):
bytes[count] = ord(c)
return bytes
#WAY TOO SLOW - MUST BE REPLACED------------
def numBits(n):
if n==0:
return 0
n= 1L * n; #convert to long, if it isn't already
return n.__tojava__(java.math.BigInteger).bitLength()
#Adjust the string to an array of bytes
def stringToJavaByteArray(s):
bytes = jarray.zeros(len(s), 'b')
for count, c in enumerate(s):
x = ord(c)
if x >= 128: x -= 256
bytes[count] = x
return bytes
BaseException = java.lang.Exception
import sys
import traceback
def formatExceptionTrace(e):
newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
return newStr | bsd-3-clause |
CVL-dev/cvl-fabric-launcher | pyinstaller-2.1/PyInstaller/lib/unittest2/test/test_loader.py | 11 | 49503 | import sys
import types
import unittest2
if sys.version_info[:2] == (2,3):
from sets import Set as set
from sets import ImmutableSet as frozenset
class Test_TestLoader(unittest2.TestCase):
### Tests for TestLoader.loadTestsFromTestCase
################################################################
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = unittest2.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest2.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
def test_loadTestsFromTestCase__no_matches(self):
class Foo(unittest2.TestCase):
def foo_bar(self): pass
empty_suite = unittest2.TestSuite()
loader = unittest2.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
# that isn't a subclass of TestCase? Specifically, what happens
# if testCaseClass is a subclass of TestSuite?
#
# This is checked for specifically in the code, so we better add a
# test for it.
def test_loadTestsFromTestCase__TestSuite_subclass(self):
class NotATestCase(unittest2.TestSuite):
pass
loader = unittest2.TestLoader()
try:
loader.loadTestsFromTestCase(NotATestCase)
except TypeError:
pass
else:
self.fail('Should raise TypeError')
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
# name (as specified by TestCase), even though the method name does
# not match the default TestLoader.testMethodPrefix string
def test_loadTestsFromTestCase__default_method_name(self):
class Foo(unittest2.TestCase):
def runTest(self):
pass
loader = unittest2.TestLoader()
# This has to be false for the test to succeed
self.assertFalse('runTest'.startswith(loader.testMethodPrefix))
suite = loader.loadTestsFromTestCase(Foo)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [Foo('runTest')])
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
### Tests for TestLoader.loadTestsFromModule
################################################################
# "This method searches `module` for classes derived from TestCase"
def test_loadTestsFromModule__TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
def test_loadTestsFromModule__no_TestCase_instances(self):
m = types.ModuleType('m')
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (TestCases instances, but no tests)?
def test_loadTestsFromModule__no_TestCase_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [loader.suiteClass()])
# "This method searches `module` for classes derived from TestCase"s
#
# What happens if loadTestsFromModule() is given something other
# than a module?
#
# XXX Currently, it succeeds anyway. This flexibility
# should either be documented or loadTestsFromModule() should
# raise a TypeError
#
# XXX Certain people are using this behaviour. We'll add a test for it
def test_loadTestsFromModule__not_a_module(self):
class MyTestCase(unittest2.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(NotAModule)
reference = [unittest2.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# Check that loadTestsFromModule honors (or not) a module
# with a load_tests function.
def test_loadTestsFromModule__load_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest2.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest2.TestSuite)
self.assertEquals(load_tests_args, [loader, suite, None])
load_tests_args = []
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertEquals(load_tests_args, [])
def test_loadTestsFromModule__faulty_load_tests(self):
m = types.ModuleType('m')
def load_tests(loader, tests, pattern):
raise TypeError('some failure')
m.load_tests = load_tests
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest2.TestSuite)
self.assertEqual(suite.countTestCases(), 1)
test = list(suite)[0]
self.assertRaisesRegexp(TypeError, "some failure", test.m)
################################################################
### /Tests for TestLoader.loadTestsFromModule()
### Tests for TestLoader.loadTestsFromName()
################################################################
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromName__empty_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('')
except ValueError, e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the name contains invalid characters?
def test_loadTestsFromName__malformed_name(self):
loader = unittest2.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromName('abc () //')
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve ... to a
# module"
#
# What happens when a module by that name can't be found?
def test_loadTestsFromName__unknown_module_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf')
except ImportError, e:
self.assertEqual(str(e), "No module named sdasfasfasdf")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute can't?
def test_loadTestsFromName__unknown_attr_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('unittest2.sdasfasfasdf')
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when we provide the module, but the attribute can't be
# found?
def test_loadTestsFromName__relative_unknown_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf', unittest2)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise ValueError when passed an empty
# name relative to a provided module?
#
# XXX Should probably raise a ValueError instead of an AttributeError
def test_loadTestsFromName__relative_empty_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('', unittest2)
except AttributeError:
pass
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when an impossible name is given, relative to the provided
# `module`?
def test_loadTestsFromName__relative_malformed_name(self):
loader = unittest2.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromName('abc () //', unittest2)
except ValueError:
pass
except AttributeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
# XXX Accepts the not-a-module object, ignorning the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
def test_loadTestsFromName__relative_not_a_module(self):
class MyTestCase(unittest2.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('test_2', NotAModule)
reference = [MyTestCase('test')]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromName__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('testcase_1', m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may
# resolve either to ... a test case class"
def test_loadTestsFromName__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('testcase_1', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
def test_loadTestsFromName__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testsuite = unittest2.TestSuite([MyTestCase('test')])
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('testsuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
def test_loadTestsFromName__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does loadTestsFromName() raise the proper exception when trying to
# resolve "a test method within a test case class" that doesn't exist
# for the given name (relative to a provided module)?
def test_loadTestsFromName__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('testcase_1.testfoo', m)
except AttributeError, e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromName__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest2.FunctionTestCase(lambda: None)
testcase_2 = unittest2.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest2.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('return_TestSuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1, testcase_2])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromName__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest2.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self):
class SubTestSuite(unittest2.TestSuite):
pass
m = types.ModuleType('m')
testcase_1 = unittest2.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest2.TestLoader()
loader.suiteClass = SubTestSuite
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self):
class SubTestSuite(unittest2.TestSuite):
pass
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
loader.suiteClass=SubTestSuite
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens if the callable returns something else?
def test_loadTestsFromName__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('return_wrong', m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromName__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest2.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest2.TestLoader()
try:
suite = loader.loadTestsFromName(module_name)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### Tests for TestLoader.loadTestsFromName()
### Tests for TestLoader.loadTestsFromNames()
################################################################
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
#
# What happens if that sequence of names is empty?
def test_loadTestsFromNames__empty_name_list(self):
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames([])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens if that sequence of names is empty?
#
# XXX Should this raise a ValueError or just return an empty TestSuite?
def test_loadTestsFromNames__relative_empty_name_list(self):
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames([], unittest2)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromNames__empty_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames([''])
except ValueError, e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when presented with an impossible module name?
def test_loadTestsFromNames__malformed_name(self):
loader = unittest2.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromNames(['abc () //'])
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when no module can be found for the given name?
def test_loadTestsFromNames__unknown_module_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'])
except ImportError, e:
self.assertEqual(str(e), "No module named sdasfasfasdf")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module can be found, but not the attribute?
def test_loadTestsFromNames__unknown_attr_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['unittest2.sdasfasfasdf', 'unittest2'])
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when given an unknown attribute on a specified `module`
# argument?
def test_loadTestsFromNames__unknown_name_relative_1(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'], unittest2)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Do unknown attributes (relative to a provided module) still raise an
# exception even in the presence of valid attribute names?
def test_loadTestsFromNames__unknown_name_relative_2(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest2)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when faced with the empty string?
#
# XXX This currently raises AttributeError, though ValueError is probably
# more appropriate
def test_loadTestsFromNames__relative_empty_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames([''], unittest2)
except AttributeError:
pass
else:
self.fail("Failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when presented with an impossible attribute name?
def test_loadTestsFromNames__relative_malformed_name(self):
loader = unittest2.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromNames(['abc () //'], unittest2)
except AttributeError:
pass
except ValueError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromNames() make sure the provided `module` is in fact
# a module?
#
# XXX This validation is currently not done. This flexibility should
# either be documented or a TypeError should be raised.
def test_loadTestsFromNames__relative_not_a_module(self):
class MyTestCase(unittest2.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['test_2'], NotAModule)
reference = [unittest2.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromNames__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1'], m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test case class"
def test_loadTestsFromNames__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = loader.suiteClass([MyTestCase('test')])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a TestSuite instance"
def test_loadTestsFromNames__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testsuite = unittest2.TestSuite([MyTestCase('test')])
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['testsuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [m.testsuite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
def test_loadTestsFromNames__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest2.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
#
# Does the method gracefully handle names that initially look like they
# resolve to "a test method within a test case class" but don't?
def test_loadTestsFromNames__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1.testfoo'], m)
except AttributeError, e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromNames__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest2.FunctionTestCase(lambda: None)
testcase_2 = unittest2.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest2.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['return_TestSuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = unittest2.TestSuite([testcase_1, testcase_2])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromNames__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest2.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['return_TestCase'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest2.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# Are staticmethods handled correctly?
def test_loadTestsFromNames__callable__call_staticmethod(self):
m = types.ModuleType('m')
class Test1(unittest2.TestCase):
def test(self):
pass
testcase_1 = Test1('test')
class Foo(unittest2.TestCase):
def foo():
return testcase_1
foo = staticmethod(foo)
m.Foo = Foo
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['Foo.foo'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest2.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens when the callable returns something else?
def test_loadTestsFromNames__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['return_wrong'], m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromNames__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest2.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest2.TestLoader()
try:
suite = loader.loadTestsFromNames([module_name])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [unittest2.TestSuite()])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### /Tests for TestLoader.loadTestsFromNames()
### Tests for TestLoader.getTestCaseNames()
################################################################
# "Return a sorted sequence of method names found within testCaseClass"
#
# Test.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames(self):
class Test(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
loader = unittest2.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Does getTestCaseNames() behave appropriately if no tests are found?
def test_getTestCaseNames__no_tests(self):
class Test(unittest2.TestCase):
def foobar(self): pass
loader = unittest2.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), [])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Are not-TestCases handled gracefully?
#
# XXX This should raise a TypeError, not return a list
#
# XXX It's too late in the 2.5 release cycle to fix this, but it should
# probably be revisited for 2.6
def test_getTestCaseNames__not_a_TestCase(self):
class BadCase(int):
def test_foo(self):
pass
loader = unittest2.TestLoader()
names = loader.getTestCaseNames(BadCase)
self.assertEqual(names, ['test_foo'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Make sure inherited names are handled.
#
# TestP.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames__inheritance(self):
class TestP(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
class TestC(TestP):
def test_1(self): pass
def test_3(self): pass
loader = unittest2.TestLoader()
names = ['test_1', 'test_2', 'test_3']
self.assertEqual(loader.getTestCaseNames(TestC), names)
################################################################
### /Tests for TestLoader.getTestCaseNames()
### Tests for TestLoader.testMethodPrefix
################################################################
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromTestCase(self):
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests_1 = unittest2.TestSuite([Foo('foo_bar')])
tests_2 = unittest2.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest2.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = [unittest2.TestSuite([Foo('foo_bar')])]
tests_2 = [unittest2.TestSuite([Foo('test_1'), Foo('test_2')])]
loader = unittest2.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest2.TestSuite([Foo('foo_bar')])
tests_2 = unittest2.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest2.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest2.TestSuite([unittest2.TestSuite([Foo('foo_bar')])])
tests_2 = unittest2.TestSuite([Foo('test_1'), Foo('test_2')])
tests_2 = unittest2.TestSuite([tests_2])
loader = unittest2.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2)
# "The default value is 'test'"
def test_testMethodPrefix__default_value(self):
loader = unittest2.TestLoader()
self.assertTrue(loader.testMethodPrefix == 'test')
################################################################
### /Tests for TestLoader.testMethodPrefix
### Tests for TestLoader.sortTestMethodsUsing
################################################################
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromTestCase(self):
def reversed_cmp(x, y):
return -cmp(x, y)
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromModule(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromModule(m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromName(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromNames(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames()"
#
# Does it actually affect getTestCaseNames()?
def test_sortTestMethodsUsing__getTestCaseNames(self):
def reversed_cmp(x, y):
return -cmp(x, y)
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
test_names = ['test_2', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), test_names)
# "The default value is the built-in cmp() function"
def test_sortTestMethodsUsing__default_value(self):
loader = unittest2.TestLoader()
self.assertTrue(loader.sortTestMethodsUsing is cmp)
# "it can be set to None to disable the sort."
#
# XXX How is this different from reassigning cmp? Are the tests returned
# in a random order or something? This behaviour should die
def test_sortTestMethodsUsing__None(self):
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = None
test_names = ['test_2', 'test_1']
self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names))
################################################################
### /Tests for TestLoader.sortTestMethodsUsing
### Tests for TestLoader.suiteClass
################################################################
# "Callable object that constructs a test suite from a list of tests."
def test_suiteClass__loadTestsFromTestCase(self):
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest2.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest2.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromModule(m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest2.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest2.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests)
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest2.TestLoader()
self.assertTrue(loader.suiteClass is unittest2.TestSuite)
if __name__ == '__main__':
unittest2.main()
| gpl-3.0 |
altairpearl/scikit-learn | sklearn/ensemble/base.py | 40 | 3621 | """
Base class for ensemble-based estimators.
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from ..base import clone
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..utils import _get_n_jobs
class BaseEnsemble(BaseEstimator, MetaEstimatorMixin):
"""Base class for all ensemble classes.
Warning: This class should not be used directly. Use derived classes
instead.
Parameters
----------
base_estimator : object, optional (default=None)
The base estimator from which the ensemble is built.
n_estimators : integer
The number of estimators in the ensemble.
estimator_params : list of strings
The list of attributes to use as parameters when instantiating a
new base estimator. If none are given, default parameters are used.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of estimators
The collection of fitted base estimators.
"""
def __init__(self, base_estimator, n_estimators=10,
estimator_params=tuple()):
# Set parameters
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.estimator_params = estimator_params
# Don't instantiate estimators now! Parameters of base_estimator might
# still change. Eg., when grid-searching with the nested object syntax.
# This needs to be filled by the derived classes.
self.estimators_ = []
def _validate_estimator(self, default=None):
"""Check the estimator and the n_estimator attribute, set the
`base_estimator_` attribute."""
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than zero, "
"got {0}.".format(self.n_estimators))
if self.base_estimator is not None:
self.base_estimator_ = self.base_estimator
else:
self.base_estimator_ = default
if self.base_estimator_ is None:
raise ValueError("base_estimator cannot be None")
def _make_estimator(self, append=True):
"""Make and configure a copy of the `base_estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(self.base_estimator_)
estimator.set_params(**dict((p, getattr(self, p))
for p in self.estimator_params))
if append:
self.estimators_.append(estimator)
return estimator
def __len__(self):
"""Returns the number of estimators in the ensemble."""
return len(self.estimators_)
def __getitem__(self, index):
"""Returns the index'th estimator in the ensemble."""
return self.estimators_[index]
def __iter__(self):
"""Returns iterator over estimators in the ensemble."""
return iter(self.estimators_)
def _partition_estimators(n_estimators, n_jobs):
"""Private function used to partition estimators between jobs."""
# Compute the number of jobs
n_jobs = min(_get_n_jobs(n_jobs), n_estimators)
# Partition estimators between jobs
n_estimators_per_job = (n_estimators // n_jobs) * np.ones(n_jobs,
dtype=np.int)
n_estimators_per_job[:n_estimators % n_jobs] += 1
starts = np.cumsum(n_estimators_per_job)
return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
| bsd-3-clause |
delete/estofadora | estofadora/bills/views.py | 1 | 1602 | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.urlresolvers import reverse
from .forms import BillForm
from .models import Bill
@login_required
def new(request):
context = {}
if request.method == 'POST':
form = BillForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Cadastrada com sucesso!')
return redirect(reverse('bills:new'))
else:
form = BillForm()
context['form'] = form
context['section'] = 'bill_new'
return render(request, 'bills/new.html', context)
@login_required
def list(request):
context = {}
if request.method == 'POST':
bill_name = request.POST.get('name')
bills = Bill.objects.filter(
name__icontains=bill_name
).order_by('-date_to_pay')
print(bills)
else:
bills = Bill.objects.all().order_by('-date_to_pay')
context['bills'] = bills
context['section'] = 'bills'
return render(request, 'bills/list.html', context)
@login_required
def delete(request, pk):
bill = get_object_or_404(Bill, pk=pk)
bill.delete()
messages.success(request, 'Conta removida com sucesso!')
return redirect(reverse('bills:list'))
@login_required
def mark_as_paid(request, pk):
bill = get_object_or_404(Bill, pk=pk)
bill.is_paid = True
bill.save()
messages.success(request, 'Conta marcada como paga!')
return redirect(reverse('bills:list'))
| mit |
infoxchange/lettuce | tests/integration/lib/Django-1.3/django/core/management/commands/shell.py | 230 | 3263 | import os
from django.core.management.base import NoArgsCommand
from optparse import make_option
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not IPython.'),
)
help = "Runs a Python interactive interpreter. Tries to use IPython, if it's available."
shells = ['ipython', 'bpython']
requires_model_validation = False
def ipython(self):
try:
from IPython.frontend.terminal.embed import TerminalInteractiveShell
shell = TerminalInteractiveShell()
shell.mainloop()
except ImportError:
# IPython < 0.11
# Explicitly pass an empty list as arguments, because otherwise
# IPython would use sys.argv from this script.
try:
from IPython.Shell import IPShell
shell = IPShell(argv=[])
shell.mainloop()
except ImportError:
# IPython not found at all, raise ImportError
raise
def bpython(self):
import bpython
bpython.embed()
def run_shell(self):
for shell in self.shells:
try:
return getattr(self, shell)()
except ImportError:
pass
raise ImportError
def handle_noargs(self, **options):
# XXX: (Temporary) workaround for ticket #1796: force early loading of all
# models from installed apps.
from django.db.models.loading import get_models
loaded_models = get_models()
use_plain = options.get('plain', False)
try:
if use_plain:
# Don't bother loading IPython, because the user wants plain Python.
raise ImportError
self.run_shell()
except ImportError:
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then import user.
if not use_plain:
pythonrc = os.environ.get("PYTHONSTARTUP")
if pythonrc and os.path.isfile(pythonrc):
try:
execfile(pythonrc)
except NameError:
pass
# This will import .pythonrc.py as a side-effect
import user
code.interact(local=imported_objects)
| gpl-3.0 |
Keeperv85/Blade_G_kernel | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
grlee77/scipy | scipy/stats/mstats_extras.py | 12 | 14809 | """
Additional statistics functions with support for masked arrays.
"""
# Original author (2007): Pierre GF Gerard-Marchant
__all__ = ['compare_medians_ms',
'hdquantiles', 'hdmedian', 'hdquantiles_sd',
'idealfourths',
'median_cihs','mjci','mquantiles_cimj',
'rsh',
'trimmed_mean_ci',]
import numpy as np
from numpy import float_, int_, ndarray
import numpy.ma as ma
from numpy.ma import MaskedArray
from . import mstats_basic as mstats
from scipy.stats.distributions import norm, beta, t, binom
def hdquantiles(data, prob=list([.25,.5,.75]), axis=None, var=False,):
"""
Computes quantile estimates with the Harrell-Davis method.
The quantile estimates are calculated as a weighted linear combination
of order statistics.
Parameters
----------
data : array_like
Data array.
prob : sequence, optional
Sequence of quantiles to compute.
axis : int or None, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
var : bool, optional
Whether to return the variance of the estimate.
Returns
-------
hdquantiles : MaskedArray
A (p,) array of quantiles (if `var` is False), or a (2,p) array of
quantiles and variances (if `var` is True), where ``p`` is the
number of quantiles.
See Also
--------
hdquantiles_sd
"""
def _hd_1D(data,prob,var):
"Computes the HD quantiles for a 1D array. Returns nan for invalid data."
xsorted = np.squeeze(np.sort(data.compressed().view(ndarray)))
# Don't use length here, in case we have a numpy scalar
n = xsorted.size
hd = np.empty((2,len(prob)), float_)
if n < 2:
hd.flat = np.nan
if var:
return hd
return hd[0]
v = np.arange(n+1) / float(n)
betacdf = beta.cdf
for (i,p) in enumerate(prob):
_w = betacdf(v, (n+1)*p, (n+1)*(1-p))
w = _w[1:] - _w[:-1]
hd_mean = np.dot(w, xsorted)
hd[0,i] = hd_mean
#
hd[1,i] = np.dot(w, (xsorted-hd_mean)**2)
#
hd[0, prob == 0] = xsorted[0]
hd[0, prob == 1] = xsorted[-1]
if var:
hd[1, prob == 0] = hd[1, prob == 1] = np.nan
return hd
return hd[0]
# Initialization & checks
data = ma.array(data, copy=False, dtype=float_)
p = np.array(prob, copy=False, ndmin=1)
# Computes quantiles along axis (or globally)
if (axis is None) or (data.ndim == 1):
result = _hd_1D(data, p, var)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
result = ma.apply_along_axis(_hd_1D, axis, data, p, var)
return ma.fix_invalid(result, copy=False)
def hdmedian(data, axis=-1, var=False):
"""
Returns the Harrell-Davis estimate of the median along the given axis.
Parameters
----------
data : ndarray
Data array.
axis : int, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
var : bool, optional
Whether to return the variance of the estimate.
Returns
-------
hdmedian : MaskedArray
The median values. If ``var=True``, the variance is returned inside
the masked array. E.g. for a 1-D array the shape change from (1,) to
(2,).
"""
result = hdquantiles(data,[0.5], axis=axis, var=var)
return result.squeeze()
def hdquantiles_sd(data, prob=list([.25,.5,.75]), axis=None):
"""
The standard error of the Harrell-Davis quantile estimates by jackknife.
Parameters
----------
data : array_like
Data array.
prob : sequence, optional
Sequence of quantiles to compute.
axis : int, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
Returns
-------
hdquantiles_sd : MaskedArray
Standard error of the Harrell-Davis quantile estimates.
See Also
--------
hdquantiles
"""
def _hdsd_1D(data, prob):
"Computes the std error for 1D arrays."
xsorted = np.sort(data.compressed())
n = len(xsorted)
hdsd = np.empty(len(prob), float_)
if n < 2:
hdsd.flat = np.nan
vv = np.arange(n) / float(n-1)
betacdf = beta.cdf
for (i,p) in enumerate(prob):
_w = betacdf(vv, (n+1)*p, (n+1)*(1-p))
w = _w[1:] - _w[:-1]
mx_ = np.fromiter([w[:k] @ xsorted[:k] + w[k:] @ xsorted[k+1:]
for k in range(n)], dtype=float_)
mx_var = np.array(mx_.var(), copy=False, ndmin=1) * n / float(n-1)
hdsd[i] = float(n-1) * np.sqrt(np.diag(mx_var).diagonal() / float(n))
return hdsd
# Initialization & checks
data = ma.array(data, copy=False, dtype=float_)
p = np.array(prob, copy=False, ndmin=1)
# Computes quantiles along axis (or globally)
if (axis is None):
result = _hdsd_1D(data, p)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
result = ma.apply_along_axis(_hdsd_1D, axis, data, p)
return ma.fix_invalid(result, copy=False).ravel()
def trimmed_mean_ci(data, limits=(0.2,0.2), inclusive=(True,True),
alpha=0.05, axis=None):
"""
Selected confidence interval of the trimmed mean along the given axis.
Parameters
----------
data : array_like
Input data.
limits : {None, tuple}, optional
None or a two item tuple.
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1. If ``n``
is the number of unmasked data before trimming, then
(``n * limits[0]``)th smallest data and (``n * limits[1]``)th
largest data are masked. The total number of unmasked data after
trimming is ``n * (1. - sum(limits))``.
The value of one limit can be set to None to indicate an open interval.
Defaults to (0.2, 0.2).
inclusive : (2,) tuple of boolean, optional
If relative==False, tuple indicating whether values exactly equal to
the absolute limits are allowed.
If relative==True, tuple indicating whether the number of data being
masked on each side should be rounded (True) or truncated (False).
Defaults to (True, True).
alpha : float, optional
Confidence level of the intervals.
Defaults to 0.05.
axis : int, optional
Axis along which to cut. If None, uses a flattened version of `data`.
Defaults to None.
Returns
-------
trimmed_mean_ci : (2,) ndarray
The lower and upper confidence intervals of the trimmed data.
"""
data = ma.array(data, copy=False)
trimmed = mstats.trimr(data, limits=limits, inclusive=inclusive, axis=axis)
tmean = trimmed.mean(axis)
tstde = mstats.trimmed_stde(data,limits=limits,inclusive=inclusive,axis=axis)
df = trimmed.count(axis) - 1
tppf = t.ppf(1-alpha/2.,df)
return np.array((tmean - tppf*tstde, tmean+tppf*tstde))
def mjci(data, prob=[0.25,0.5,0.75], axis=None):
"""
Returns the Maritz-Jarrett estimators of the standard error of selected
experimental quantiles of the data.
Parameters
----------
data : ndarray
Data array.
prob : sequence, optional
Sequence of quantiles to compute.
axis : int or None, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
"""
def _mjci_1D(data, p):
data = np.sort(data.compressed())
n = data.size
prob = (np.array(p) * n + 0.5).astype(int_)
betacdf = beta.cdf
mj = np.empty(len(prob), float_)
x = np.arange(1,n+1, dtype=float_) / n
y = x - 1./n
for (i,m) in enumerate(prob):
W = betacdf(x,m-1,n-m) - betacdf(y,m-1,n-m)
C1 = np.dot(W,data)
C2 = np.dot(W,data**2)
mj[i] = np.sqrt(C2 - C1**2)
return mj
data = ma.array(data, copy=False)
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
p = np.array(prob, copy=False, ndmin=1)
# Computes quantiles along axis (or globally)
if (axis is None):
return _mjci_1D(data, p)
else:
return ma.apply_along_axis(_mjci_1D, axis, data, p)
def mquantiles_cimj(data, prob=[0.25,0.50,0.75], alpha=0.05, axis=None):
"""
Computes the alpha confidence interval for the selected quantiles of the
data, with Maritz-Jarrett estimators.
Parameters
----------
data : ndarray
Data array.
prob : sequence, optional
Sequence of quantiles to compute.
alpha : float, optional
Confidence level of the intervals.
axis : int or None, optional
Axis along which to compute the quantiles.
If None, use a flattened array.
Returns
-------
ci_lower : ndarray
The lower boundaries of the confidence interval. Of the same length as
`prob`.
ci_upper : ndarray
The upper boundaries of the confidence interval. Of the same length as
`prob`.
"""
alpha = min(alpha, 1 - alpha)
z = norm.ppf(1 - alpha/2.)
xq = mstats.mquantiles(data, prob, alphap=0, betap=0, axis=axis)
smj = mjci(data, prob, axis=axis)
return (xq - z * smj, xq + z * smj)
def median_cihs(data, alpha=0.05, axis=None):
"""
Computes the alpha-level confidence interval for the median of the data.
Uses the Hettmasperger-Sheather method.
Parameters
----------
data : array_like
Input data. Masked values are discarded. The input should be 1D only,
or `axis` should be set to None.
alpha : float, optional
Confidence level of the intervals.
axis : int or None, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
Returns
-------
median_cihs
Alpha level confidence interval.
"""
def _cihs_1D(data, alpha):
data = np.sort(data.compressed())
n = len(data)
alpha = min(alpha, 1-alpha)
k = int(binom._ppf(alpha/2., n, 0.5))
gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
if gk < 1-alpha:
k -= 1
gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
gkk = binom.cdf(n-k-1,n,0.5) - binom.cdf(k,n,0.5)
I = (gk - 1 + alpha)/(gk - gkk)
lambd = (n-k) * I / float(k + (n-2*k)*I)
lims = (lambd*data[k] + (1-lambd)*data[k-1],
lambd*data[n-k-1] + (1-lambd)*data[n-k])
return lims
data = ma.array(data, copy=False)
# Computes quantiles along axis (or globally)
if (axis is None):
result = _cihs_1D(data, alpha)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
result = ma.apply_along_axis(_cihs_1D, axis, data, alpha)
return result
def compare_medians_ms(group_1, group_2, axis=None):
"""
Compares the medians from two independent groups along the given axis.
The comparison is performed using the McKean-Schrader estimate of the
standard error of the medians.
Parameters
----------
group_1 : array_like
First dataset. Has to be of size >=7.
group_2 : array_like
Second dataset. Has to be of size >=7.
axis : int, optional
Axis along which the medians are estimated. If None, the arrays are
flattened. If `axis` is not None, then `group_1` and `group_2`
should have the same shape.
Returns
-------
compare_medians_ms : {float, ndarray}
If `axis` is None, then returns a float, otherwise returns a 1-D
ndarray of floats with a length equal to the length of `group_1`
along `axis`.
"""
(med_1, med_2) = (ma.median(group_1,axis=axis), ma.median(group_2,axis=axis))
(std_1, std_2) = (mstats.stde_median(group_1, axis=axis),
mstats.stde_median(group_2, axis=axis))
W = np.abs(med_1 - med_2) / ma.sqrt(std_1**2 + std_2**2)
return 1 - norm.cdf(W)
def idealfourths(data, axis=None):
"""
Returns an estimate of the lower and upper quartiles.
Uses the ideal fourths algorithm.
Parameters
----------
data : array_like
Input array.
axis : int, optional
Axis along which the quartiles are estimated. If None, the arrays are
flattened.
Returns
-------
idealfourths : {list of floats, masked array}
Returns the two internal values that divide `data` into four parts
using the ideal fourths algorithm either along the flattened array
(if `axis` is None) or along `axis` of `data`.
"""
def _idf(data):
x = data.compressed()
n = len(x)
if n < 3:
return [np.nan,np.nan]
(j,h) = divmod(n/4. + 5/12.,1)
j = int(j)
qlo = (1-h)*x[j-1] + h*x[j]
k = n - j
qup = (1-h)*x[k] + h*x[k-1]
return [qlo, qup]
data = ma.sort(data, axis=axis).view(MaskedArray)
if (axis is None):
return _idf(data)
else:
return ma.apply_along_axis(_idf, axis, data)
def rsh(data, points=None):
"""
Evaluates Rosenblatt's shifted histogram estimators for each data point.
Rosenblatt's estimator is a centered finite-difference approximation to the
derivative of the empirical cumulative distribution function.
Parameters
----------
data : sequence
Input data, should be 1-D. Masked values are ignored.
points : sequence or None, optional
Sequence of points where to evaluate Rosenblatt shifted histogram.
If None, use the data.
"""
data = ma.array(data, copy=False)
if points is None:
points = data
else:
points = np.array(points, copy=False, ndmin=1)
if data.ndim != 1:
raise AttributeError("The input array should be 1D only !")
n = data.count()
r = idealfourths(data, axis=None)
h = 1.2 * (r[-1]-r[0]) / n**(1./5)
nhi = (data[:,None] <= points[None,:] + h).sum(0)
nlo = (data[:,None] < points[None,:] - h).sum(0)
return (nhi-nlo) / (2.*n*h)
| bsd-3-clause |
kaldonis/ft-event-manager | src/lib/jinja2/testsuite/tests.py | 7 | 3474 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.tests
~~~~~~~~~~~~~~~~~~~~~~
Who tests the tests?
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Markup, Environment
env = Environment()
class TestsTestCase(JinjaTestCase):
def test_defined(self):
tmpl = env.from_string('{{ missing is defined }}|{{ true is defined }}')
assert tmpl.render() == 'False|True'
def test_even(self):
tmpl = env.from_string('''{{ 1 is even }}|{{ 2 is even }}''')
assert tmpl.render() == 'False|True'
def test_odd(self):
tmpl = env.from_string('''{{ 1 is odd }}|{{ 2 is odd }}''')
assert tmpl.render() == 'True|False'
def test_lower(self):
tmpl = env.from_string('''{{ "foo" is lower }}|{{ "FOO" is lower }}''')
assert tmpl.render() == 'True|False'
def test_typechecks(self):
tmpl = env.from_string('''
{{ 42 is undefined }}
{{ 42 is defined }}
{{ 42 is none }}
{{ none is none }}
{{ 42 is number }}
{{ 42 is string }}
{{ "foo" is string }}
{{ "foo" is sequence }}
{{ [1] is sequence }}
{{ range is callable }}
{{ 42 is callable }}
{{ range(5) is iterable }}
{{ {} is mapping }}
{{ mydict is mapping }}
{{ [] is mapping }}
''')
class MyDict(dict):
pass
assert tmpl.render(mydict=MyDict()).split() == [
'False', 'True', 'False', 'True', 'True', 'False',
'True', 'True', 'True', 'True', 'False', 'True',
'True', 'True', 'False'
]
def test_sequence(self):
tmpl = env.from_string(
'{{ [1, 2, 3] is sequence }}|'
'{{ "foo" is sequence }}|'
'{{ 42 is sequence }}'
)
assert tmpl.render() == 'True|True|False'
def test_upper(self):
tmpl = env.from_string('{{ "FOO" is upper }}|{{ "foo" is upper }}')
assert tmpl.render() == 'True|False'
def test_equalto(self):
tmpl = env.from_string('{{ foo is equalto 12 }}|'
'{{ foo is equalto 0 }}|'
'{{ foo is equalto (3 * 4) }}|'
'{{ bar is equalto "baz" }}|'
'{{ bar is equalto "zab" }}|'
'{{ bar is equalto ("ba" + "z") }}|'
'{{ bar is equalto bar }}|'
'{{ bar is equalto foo }}')
assert tmpl.render(foo=12, bar="baz") == 'True|False|True|True|False|True|True|False'
def test_sameas(self):
tmpl = env.from_string('{{ foo is sameas false }}|'
'{{ 0 is sameas false }}')
assert tmpl.render(foo=False) == 'True|False'
def test_no_paren_for_arg1(self):
tmpl = env.from_string('{{ foo is sameas none }}')
assert tmpl.render(foo=None) == 'True'
def test_escaped(self):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ x is escaped }}|{{ y is escaped }}')
assert tmpl.render(x='foo', y=Markup('foo')) == 'False|True'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestsTestCase))
return suite
| gpl-2.0 |
geekboxzone/mmallow_external_deqp | scripts/build/config.py | 2 | 5833 | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import sys
import copy
import platform
import multiprocessing
from common import which, DEQP_DIR
try:
import _winreg
except:
_winreg = None
class BuildConfig:
def __init__ (self, buildDir, buildType, args, srcPath = DEQP_DIR):
self.srcPath = srcPath
self.buildDir = buildDir
self.buildType = buildType
self.args = copy.copy(args)
def getSrcPath (self):
return self.srcPath
def getBuildDir (self):
return self.buildDir
def getBuildType (self):
return self.buildType
def getArgs (self):
return self.args
class CMakeGenerator:
def __init__ (self, name, isMultiConfig = False, extraBuildArgs = []):
self.name = name
self.isMultiConfig = isMultiConfig
self.extraBuildArgs = copy.copy(extraBuildArgs)
def getName (self):
return self.name
def getGenerateArgs (self, buildType):
args = ['-G', self.name]
if not self.isMultiConfig:
args.append('-DCMAKE_BUILD_TYPE=%s' % buildType)
return args
def getBuildArgs (self, buildType):
args = []
if self.isMultiConfig:
args += ['--config', buildType]
if len(self.extraBuildArgs) > 0:
args += ['--'] + self.extraBuildArgs
return args
def getBinaryPath (self, buildType, basePath):
return basePath
class UnixMakefileGenerator(CMakeGenerator):
def __init__(self):
CMakeGenerator.__init__(self, "Unix Makefiles", extraBuildArgs = ["-j%d" % multiprocessing.cpu_count()])
def isAvailable (self):
return which('make') != None
class NinjaGenerator(CMakeGenerator):
def __init__(self):
CMakeGenerator.__init__(self, "Ninja")
def isAvailable (self):
return which('ninja') != None
class VSProjectGenerator(CMakeGenerator):
ARCH_32BIT = 0
ARCH_64BIT = 1
def __init__(self, version, arch):
name = "Visual Studio %d" % version
if arch == self.ARCH_64BIT:
name += " Win64"
CMakeGenerator.__init__(self, name, isMultiConfig = True, extraBuildArgs = ['/m'])
self.version = version
self.arch = arch
def getBinaryPath (self, buildType, basePath):
return os.path.join(os.path.dirname(basePath), buildType, os.path.basename(basePath) + ".exe")
@staticmethod
def getNativeArch ():
arch = platform.machine().lower()
if arch == 'x86':
return VSProjectGenerator.ARCH_32BIT
elif arch == 'amd64':
return VSProjectGenerator.ARCH_64BIT
else:
raise Exception("Unhandled arch '%s'" % arch)
@staticmethod
def registryKeyAvailable (root, arch, name):
try:
key = _winreg.OpenKey(root, name, 0, _winreg.KEY_READ | arch)
_winreg.CloseKey(key)
return True
except:
return False
def isAvailable (self):
if sys.platform == 'win32' and _winreg != None:
nativeArch = VSProjectGenerator.getNativeArch()
if nativeArch == self.ARCH_32BIT and self.arch == self.ARCH_64BIT:
return False
arch = _winreg.KEY_WOW64_32KEY if nativeArch == self.ARCH_64BIT else 0
keyMap = {
10: [(_winreg.HKEY_CLASSES_ROOT, "VisualStudio.DTE.10.0"), (_winreg.HKEY_LOCAL_MACHINE, "Software\\Microsoft\\VCExpress\\10.0")],
11: [(_winreg.HKEY_CLASSES_ROOT, "VisualStudio.DTE.11.0"), (_winreg.HKEY_LOCAL_MACHINE, "Software\\Microsoft\\VCExpress\\11.0")],
12: [(_winreg.HKEY_CLASSES_ROOT, "VisualStudio.DTE.12.0"), (_winreg.HKEY_LOCAL_MACHINE, "Software\\Microsoft\\VCExpress\\12.0")],
}
if not self.version in keyMap:
raise Exception("Unsupported VS version %d" % self.version)
keys = keyMap[self.version]
for root, name in keys:
if VSProjectGenerator.registryKeyAvailable(root, arch, name):
return True
return False
else:
return False
# Pre-defined generators
MAKEFILE_GENERATOR = UnixMakefileGenerator()
NINJA_GENERATOR = NinjaGenerator()
VS2010_X32_GENERATOR = VSProjectGenerator(10, VSProjectGenerator.ARCH_32BIT)
VS2010_X64_GENERATOR = VSProjectGenerator(10, VSProjectGenerator.ARCH_64BIT)
VS2012_X32_GENERATOR = VSProjectGenerator(11, VSProjectGenerator.ARCH_32BIT)
VS2012_X64_GENERATOR = VSProjectGenerator(11, VSProjectGenerator.ARCH_64BIT)
VS2013_X32_GENERATOR = VSProjectGenerator(12, VSProjectGenerator.ARCH_32BIT)
VS2013_X64_GENERATOR = VSProjectGenerator(12, VSProjectGenerator.ARCH_64BIT)
def selectFirstAvailableGenerator (generators):
for generator in generators:
if generator.isAvailable():
return generator
return None
ANY_VS_X32_GENERATOR = selectFirstAvailableGenerator([
VS2013_X32_GENERATOR,
VS2012_X32_GENERATOR,
VS2010_X32_GENERATOR,
])
ANY_VS_X64_GENERATOR = selectFirstAvailableGenerator([
VS2013_X64_GENERATOR,
VS2012_X64_GENERATOR,
VS2010_X64_GENERATOR,
])
ANY_UNIX_GENERATOR = selectFirstAvailableGenerator([
NINJA_GENERATOR,
MAKEFILE_GENERATOR,
])
ANY_GENERATOR = selectFirstAvailableGenerator([
VS2013_X64_GENERATOR,
VS2012_X64_GENERATOR,
VS2010_X64_GENERATOR,
VS2013_X32_GENERATOR,
VS2012_X32_GENERATOR,
VS2010_X32_GENERATOR,
NINJA_GENERATOR,
MAKEFILE_GENERATOR,
])
| apache-2.0 |
Fsero/security_monkey | security_monkey/views/revision.py | 7 | 8458 | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from security_monkey.common.utils.PolicyDiff import PolicyDiff
from security_monkey.views import AuthenticatedService
from security_monkey.views import __check_auth__
from security_monkey.views import REVISION_FIELDS
from security_monkey.views import REVISION_COMMENT_FIELDS
from security_monkey.views import ITEM_FIELDS
from security_monkey.datastore import Item
from security_monkey.datastore import Account
from security_monkey.datastore import Technology
from security_monkey.datastore import ItemRevision
from flask.ext.restful import marshal, reqparse
from sqlalchemy.sql.expression import cast
from sqlalchemy import String
class RevisionGet(AuthenticatedService):
def __init__(self):
self.reqparse = reqparse.RequestParser()
super(RevisionGet, self).__init__()
def get(self, revision_id):
"""
.. http:get:: /api/1/revision/1234
Get a specific revision.
**Example Request**:
.. sourcecode:: http
GET /api/1/revision/123 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"auth": {
"authenticated": true,
"user": "user@example.com"
},
"item_id": 114,
"comments": [],
"active": false,
"date_created": "2013-10-04 22:01:47",
"config": {},
"id":123
}
:statuscode 200: no error
:statuscode 401: Authentication failure. Please login.
"""
auth, retval = __check_auth__(self.auth_dict)
if auth:
return retval
query = ItemRevision.query.filter(ItemRevision.id == revision_id)
result = query.first()
comments = []
for comment in result.comments:
comment_marshaled = marshal(comment, REVISION_COMMENT_FIELDS)
comments.append(dict(
comment_marshaled.items() +
{'user': comment.user.email}.items()
))
revision_marshaled = marshal(result, REVISION_FIELDS)
revision_marshaled = dict(
revision_marshaled.items() +
{'config': result.config}.items() +
{'auth': self.auth_dict}.items() +
{'comments': comments}.items()
)
self.reqparse.add_argument('compare', type=int, default=None, location='args')
args = self.reqparse.parse_args()
compare_id = args.pop('compare', None)
if compare_id:
query = ItemRevision.query.filter(ItemRevision.id == compare_id)
compare_result = query.first()
pdiff = PolicyDiff(result.config, compare_result.config)
revision_marshaled = dict(
revision_marshaled.items() +
{'diff_html': pdiff.produceDiffHTML()}.items()
)
return revision_marshaled, 200
class RevisionList(AuthenticatedService):
def __init__(self):
super(RevisionList, self).__init__()
def get(self):
"""
.. http:get:: /api/1/revisions
Get a list of revisions
**Example Request**:
.. sourcecode:: http
GET /api/1/revisions?count=1 HTTP/1.1
Host: example.com
Accept: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"items": [
{
"account": "example_account",
"name": "Example Name",
"region": "us-east-1",
"item_id": 144,
"active": false,
"date_created": "2014-06-19 20:54:12.962951",
"technology": "sqs",
"id": 223757
}
],
"total": 1,
"page": 1,
"auth": {
"authenticated": true,
"user": "user@example.com"
}
}
:statuscode 200: no error
:statuscode 401: Authentication Error. Please Login.
"""
auth, retval = __check_auth__(self.auth_dict)
if auth:
return retval
self.reqparse.add_argument('count', type=int, default=30, location='args')
self.reqparse.add_argument('page', type=int, default=1, location='args')
self.reqparse.add_argument('active', type=str, default=None, location='args')
self.reqparse.add_argument('regions', type=str, default=None, location='args')
self.reqparse.add_argument('accounts', type=str, default=None, location='args')
self.reqparse.add_argument('names', type=str, default=None, location='args')
self.reqparse.add_argument('technologies', type=str, default=None, location='args')
self.reqparse.add_argument('searchconfig', type=str, default=None, location='args')
args = self.reqparse.parse_args()
page = args.pop('page', None)
count = args.pop('count', None)
for k, v in args.items():
if not v:
del args[k]
query = ItemRevision.query.join("item")
if 'regions' in args:
regions = args['regions'].split(',')
query = query.filter(Item.region.in_(regions))
if 'accounts' in args:
accounts = args['accounts'].split(',')
query = query.join((Account, Account.id == Item.account_id))
query = query.filter(Account.name.in_(accounts))
if 'technologies' in args:
technologies = args['technologies'].split(',')
query = query.join((Technology, Technology.id == Item.tech_id))
query = query.filter(Technology.name.in_(technologies))
if 'names' in args:
names = args['names'].split(',')
query = query.filter(Item.name.in_(names))
if 'active' in args:
active = args['active'].lower() == "true"
query = query.filter(ItemRevision.active == active)
if 'searchconfig' in args:
searchconfig = args['searchconfig']
query = query.filter(cast(ItemRevision.config, String).ilike('%{}%'.format(searchconfig)))
query = query.order_by(ItemRevision.date_created.desc())
revisions = query.paginate(page, count)
marshaled_dict = {
'page': revisions.page,
'total': revisions.total,
'auth': self.auth_dict
}
items_marshaled = []
for revision in revisions.items:
item_marshaled = marshal(revision.item.__dict__, ITEM_FIELDS)
revision_marshaled = marshal(revision.__dict__, REVISION_FIELDS)
account_marshaled = {'account': revision.item.account.name}
technology_marshaled = {'technology': revision.item.technology.name}
merged_marshaled = dict(
item_marshaled.items() +
revision_marshaled.items() +
account_marshaled.items() +
technology_marshaled.items())
items_marshaled.append(merged_marshaled)
marshaled_dict['items'] = items_marshaled
marshaled_dict['count'] = len(items_marshaled)
return marshaled_dict, 200
| apache-2.0 |
apocalypsebg/odoo | addons/survey_crm/__openerp__.py | 312 | 1593 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Survey CRM',
'version': '2.0',
'category': 'Marketing',
'complexity': 'easy',
'website': 'https://www.odoo.com/page/survey',
'description': """
Survey - CRM (bridge module)
=================================================================================
This module adds a Survey mass mailing button inside the more option of lead/customers views
""",
'author': 'OpenERP SA',
'depends': ['crm', 'survey'],
'data': [
'crm_view.xml',
],
'installable': True,
'auto_install': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
theepicsnail/SuperBot2 | Core.py | 1 | 5362 | from PluginManager import PluginManager
from PluginDispatcher import PluginDispatcher
from Configuration import ConfigFile
from Util import call
from re import match
from sys import path
from os import getcwd
from Util import dictJoin
from Logging import LogFile
path.append(getcwd())
log = LogFile("Core")
class Core:
_PluginManager = None
_PluginDispatcher = None
_ResponseObject = None
_Connector = None
_Config = None
def _LoadConnector(self, ConName):
try:
con = __import__("%s.Connector" % ConName,
globals(), locals(), "Connector")
log.debug("Got connector:", con)
cls = getattr(con, "Connector", None)
except :
log.exception("Exception while loading connector")
cls = None
log.debug("Connectors class", cls)
if cls:
c = cls()
log.debug("Connector constructed")
return c
log.critical("No connector")
return cls
def HandleEvent(self, event):
log.dict(event,"HandleEvent")
pm = self._PluginManager
if not pm:
log.warning("No plugin manager")
return
pd = self._PluginDispatcher
if not pd:
log.warning("No plugin dispatcher")
return
ro = self._ResponseObject
if not ro:
log.warning("no response object")
pass
matches = pm.GetMatchingFunctions(event)
log.debug("Matched %i hook(s)." % len(matches))
for inst, func, args, servs in matches:
newEvent = dictJoin(event, dictJoin(args,
{"self": inst, "response": ro}))
log.debug("Services found for plugin:", servs)
if servs:
log.debug("Event before processing:", newEvent)
servDict={}
servDict["event"]=newEvent
servDict["pm"]=self._PluginManager
servDict["pd"]=self._PluginDispatcher
servDict["ro"]=self._ResponseObject
servDict["c"]=self._Connector
servDict["core"]=self
servDict["config"]=self._Config
for servName in servs:
serv = pm.GetService(servName)
log.debug("Processing service",servName,serv)
call(serv.onEvent,servDict)
if servs:
log.dict(newEvent,"Event after processing:")
#issue 5 fix goes here
newEvent.update(servDict)
pd.Enqueue((func, newEvent))
def __init__(self):
self._Config = ConfigFile("Core")
if not self._Config:
log.critical("No log file loaded!")
return
ConName = self._Config["Core", "Provider"]
if ConName == None:
log.critical("No Core:Provider in Core.cfg")
del self._Connector
return
self._Connector=self._LoadConnector(ConName)
if self._Connector:
self._PluginManager = PluginManager(ConName)
self._PluginDispatcher = PluginDispatcher()
self._Connector.SetEventHandler(self.HandleEvent)
self._ResponseObject = self._Connector.GetResponseObject()
self._PluginDispatcher.SetResponseHandler(
self._Connector.HandleResponse)
def Start(self):
if not self._Connector:
log.warning("Could not start, no connector.")
return
log.debug("Starting")
log.debug("Auto loading plugins")
self.AutoLoad()
log.debug("Auto load complete")
if self._Connector:
log.debug("Connector starting")
self._Connector.Start()
#else log error?
def Stop(self):
log.debug("Stopping")
if self._PluginDispatcher:
self._PluginDispatcher.Stop()
if self._PluginManager:
self._PluginManager.Stop()
if self._Connector:
self._Connector.Stop()
def AutoLoad(self):
if not self._PluginManager:
return
pm = self._PluginManager
log.note("Starting autoload", "Root:" + pm.root)
cf = ConfigFile(pm.root, "Autoload")
lines = ["Configuration:"]
for i in cf:
lines.append(i)
for j in cf[i]:
lines.append(" %s=%s"%(j,cf[i,j]))
log.debug(*lines)
if cf:
log.debug("Autoloading plugins.")
names = cf["Plugins", "Names"]
log.debug("Autoloading plugins", names)
if names:
for name in names.split():
pm.LoadPlugin(name)
log.debug("Autoloading finished.")
pd=self._PluginDispatcher
handler = pd.GetResponseHandler()
log.debug("Updating dedicated thread pool",self._ResponseObject,handler)
pd.EnsureDedicated(pm.GetDedicated(),self._ResponseObject,handler)
else:
log.note("No Autoload configuration file")
if __name__ == "__main__":
try:
c = Core()
try:
c.Start()
except:
log.exception("Exception while starting.")
c.Stop()
except:
log.exception("Exception while stopping.")
log.debug("End of core")
| mit |
calebthompson/googletest | test/gtest_uninitialized_test.py | 2901 | 2480 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
CAB-LAB/cablab-cubeio | test/providers/test_precip.py | 2 | 3183 | import os
import unittest
from datetime import datetime
from esdl import CubeConfig
from esdl.providers.precip import PrecipProvider
from test.providers.provider_test_utils import ProviderTestBase
from esdl.util import Config
SOURCE_DIR = Config.instance().get_cube_source_path('CPC_precip')
class PrecipProviderTest(ProviderTestBase):
@unittest.skipIf(not os.path.exists(SOURCE_DIR), 'test data not found: ' + SOURCE_DIR)
def test_source_time_ranges(self):
provider = PrecipProvider(CubeConfig(), dir=SOURCE_DIR)
provider.prepare()
source_time_ranges = provider.source_time_ranges
self.assertEqual(13149, len(source_time_ranges))
self.assert_source_time_ranges(source_time_ranges[0],
datetime(1979, 1, 1, 0, 0, 0, 33),
datetime(1979, 1, 2, 0, 0, 0, 33),
self.get_source_dir_list(SOURCE_DIR) + ['Precip.V1.720.360.1979.nc.gz'],
0)
self.assert_source_time_ranges(source_time_ranges[6],
datetime(1979, 1, 7, 0, 0, 0, 33),
datetime(1979, 1, 8, 0, 0, 0, 33),
self.get_source_dir_list(SOURCE_DIR) + ['Precip.V1.720.360.1979.nc.gz'],
6)
self.assert_source_time_ranges(source_time_ranges[13148],
datetime(2014, 12, 31, 0, 0, 0, 33),
datetime(2015, 1, 1, 0, 0, 0, 33),
self.get_source_dir_list(SOURCE_DIR) + ['Precip.RT.720.360.2014.nc.gz'],
364)
@unittest.skipIf(not os.path.exists(SOURCE_DIR), 'test data not found: ' + SOURCE_DIR)
def test_temporal_coverage(self):
provider = PrecipProvider(CubeConfig(), dir=SOURCE_DIR)
provider.prepare()
self.assertEqual((datetime(1979, 1, 1, 0, 0, 0, 33), datetime(2015, 1, 1, 0, 0, 0, 33)),
provider.temporal_coverage)
@unittest.skipIf(not os.path.exists(SOURCE_DIR), 'test data not found: ' + SOURCE_DIR)
def test_get_images(self):
provider = PrecipProvider(CubeConfig(), dir=SOURCE_DIR)
provider.prepare()
images = provider.compute_variable_images(datetime(1996, 1, 1), datetime(1996, 1, 9))
self.assertIsNotNone(images)
self.assertTrue('precipitation' in images)
image = images['precipitation']
self.assertEqual((720, 1440), image.shape)
@unittest.skipIf(not os.path.exists(SOURCE_DIR), 'test data not found: ' + SOURCE_DIR)
def test_get_high_res_images(self):
provider = PrecipProvider(CubeConfig(grid_width=4320, grid_height=2160, spatial_res=1 / 12), dir=SOURCE_DIR)
provider.prepare()
images = provider.compute_variable_images(datetime(1996, 1, 1), datetime(1996, 1, 9))
self.assertIsNotNone(images)
self.assertTrue('precipitation' in images)
image = images['precipitation']
self.assertEqual((2160, 4320), image.shape)
| gpl-3.0 |
mpetyx/palmdrop | venv/lib/python2.7/site-packages/django/contrib/localflavor/ca/forms.py | 100 | 4953 | """
Canada-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, CharField, Select
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
phone_digits_re = re.compile(r'^(?:1-?)?(\d{3})[-\.]?(\d{3})[-\.]?(\d{4})$')
sin_re = re.compile(r"^(\d{3})-(\d{3})-(\d{3})$")
class CAPostalCodeField(CharField):
"""
Canadian postal code field.
Validates against known invalid characters: D, F, I, O, Q, U
Additionally the first character cannot be Z or W.
For more info see:
http://www.canadapost.ca/tools/pg/manual/PGaddress-e.asp#1402170
"""
default_error_messages = {
'invalid': _('Enter a postal code in the format XXX XXX.'),
}
postcode_regex = re.compile(r'^([ABCEGHJKLMNPRSTVXY]\d[ABCEGHJKLMNPRSTVWXYZ]) *(\d[ABCEGHJKLMNPRSTVWXYZ]\d)$')
def clean(self, value):
value = super(CAPostalCodeField, self).clean(value)
if value in EMPTY_VALUES:
return ''
postcode = value.upper().strip()
m = self.postcode_regex.match(postcode)
if not m:
raise ValidationError(self.default_error_messages['invalid'])
return "%s %s" % (m.group(1), m.group(2))
class CAPhoneNumberField(Field):
"""Canadian phone number field."""
default_error_messages = {
'invalid': _('Phone numbers must be in XXX-XXX-XXXX format.'),
}
def clean(self, value):
"""Validate a phone number.
"""
super(CAPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub('(\(|\)|\s+)', '', smart_text(value))
m = phone_digits_re.search(value)
if m:
return '%s-%s-%s' % (m.group(1), m.group(2), m.group(3))
raise ValidationError(self.error_messages['invalid'])
class CAProvinceField(Field):
"""
A form field that validates its input is a Canadian province name or abbreviation.
It normalizes the input to the standard two-leter postal service
abbreviation for the given province.
"""
default_error_messages = {
'invalid': _('Enter a Canadian province or territory.'),
}
def clean(self, value):
super(CAProvinceField, self).clean(value)
if value in EMPTY_VALUES:
return ''
try:
value = value.strip().lower()
except AttributeError:
pass
else:
# Load data in memory only when it is required, see also #17275
from .ca_provinces import PROVINCES_NORMALIZED
try:
return PROVINCES_NORMALIZED[value.strip().lower()]
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class CAProvinceSelect(Select):
"""
A Select widget that uses a list of Canadian provinces and
territories as its choices.
"""
def __init__(self, attrs=None):
# Load data in memory only when it is required, see also #17275
from .ca_provinces import PROVINCE_CHOICES
super(CAProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class CASocialInsuranceNumberField(Field):
"""
A Canadian Social Insurance Number (SIN).
Checks the following rules to determine whether the number is valid:
* Conforms to the XXX-XXX-XXX format.
* Passes the check digit process "Luhn Algorithm"
See: http://en.wikipedia.org/wiki/Social_Insurance_Number
"""
default_error_messages = {
'invalid': _('Enter a valid Canadian Social Insurance number in XXX-XXX-XXX format.'),
}
def clean(self, value):
super(CASocialInsuranceNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
match = re.match(sin_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
number = '%s-%s-%s' % (match.group(1), match.group(2), match.group(3))
check_number = '%s%s%s' % (match.group(1), match.group(2), match.group(3))
if not self.luhn_checksum_is_valid(check_number):
raise ValidationError(self.error_messages['invalid'])
return number
def luhn_checksum_is_valid(self, number):
"""
Checks to make sure that the SIN passes a luhn mod-10 checksum
See: http://en.wikipedia.org/wiki/Luhn_algorithm
"""
sum = 0
num_digits = len(number)
oddeven = num_digits & 1
for count in range(0, num_digits):
digit = int(number[count])
if not (( count & 1 ) ^ oddeven ):
digit = digit * 2
if digit > 9:
digit = digit - 9
sum = sum + digit
return ( (sum % 10) == 0 )
| apache-2.0 |
ryanpstauffer/market-vis | marketvis/quotes.py | 1 | 5030 | # -*- coding: utf-8 -*-
"""
[Python 2.7 (Mayavi is not yet compatible with Python 3+)]
Created on Wed Dec 16 22:44:15 2015
@author: Ryan Stauffer
https://github.com/ryanpstauffer/market-vis
[This module referenced http://www.theodor.io/scraping-google-finance-data-using-pandas/]
Market Visualization Prototype
Quotes Module
"""
from datetime import datetime, date
import pandas as pd
import json
import urllib
import urllib2
import os
def getIntradayData(ticker, interval_seconds=61, num_days=10):
# Specify URL string based on function inputs.
urlString = 'http://www.google.com/finance/getprices?q={0}'.format(ticker.upper())
urlString += "&i={0}&p={1}d&f=d,c".format(interval_seconds,num_days)
# Request the text, and split by each line
r = urllib2.urlopen(urllib2.Request(urlString)).read()
r = r.splitlines()
# Split each line by a comma, starting at the 8th line
r = [line.split(',') for line in r[7:]]
# Save data in Pandas DataFrame
df = pd.DataFrame(r, columns=['Datetime',ticker])
# Convert UNIX to Datetime format
df['Datetime'] = df['Datetime'].apply(lambda x: datetime.fromtimestamp(int(x[1:])))
df.index = df['Datetime']
return df[ticker]
def getDailyData(ticker, startDate, endDate=date.today()):
''' Daily quotes from Google Finance API. Date format='yyyy-mm-dd' '''
ticker = ticker.upper()
urlString = "http://www.google.com/finance/historical?q={0}".format(ticker)
urlString += "&startdate={0}&enddate={1}&output=csv".format(
startDate.strftime('%b %d, %Y'),endDate.strftime('%b %d, %Y'))
#Convert URL output to dataframe
df = pd.read_csv(urllib.urlopen(urlString))
# Convert strings to Datetime format
df[df.columns[0]] = df[df.columns[0]].apply(lambda x: datetime.strptime(x, '%d-%b-%y'))
#Index by date
df.index = df[df.columns[0]]
df.drop(df.columns[0], axis=1, inplace=True)
return df
def getLastPrice(ticker):
'''Returns last price and date time of a given ticker (from Google Finance API)'''
# Specify URL string based on function inputs.
urlString = 'http://www.google.com/finance/info?client=ig&q={0}'.format(ticker.upper())
# Request the text, and split by each line
r = urllib2.urlopen(urllib2.Request(urlString)).read()
obj = json.loads(r[3:])
print(obj)
price = float(obj[0]['l'])
return price
def buildDailyPriceData(tickerList, startDate, endDate):
print('Pulling Market Data for S&P 500 from {0} to {1}'.format(startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d')))
#Build SP500 daily price data (for saving)
firstTicker = tickerList[0]
print(firstTicker)
firstTickerData = getDailyData(firstTicker, startDate, endDate)
firstTickerData.rename(columns={'Close' : firstTicker}, inplace = True)
df = firstTickerData[firstTicker]
for ticker in tickerList[1:]:
print(ticker)
newTicker = getDailyData(ticker, startDate, endDate)
if not newTicker.empty:
newTicker.rename(columns={'Close' : ticker}, inplace = True)
df = pd.concat([df, newTicker[ticker]], axis=1, join='outer')
#Google returns data w/ most recent at the top, this puts data in chrono order
stockPrices = df.sort_index()
print('Pulled data for {0} stocks from {1} to {2}'.format(len(stockPrices.columns), startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d')))
return stockPrices
def buildDummyData():
'''Builds Daily Price Data from a backup .csv file
Used for offline testing purposes
'''
#Select Dates
startDate = datetime.strptime('20120101', '%Y%m%d')
endDate = datetime.strptime('20130101', '%Y%m%d')
#Load dataset from .csv
print("Pulling Market Data from .csv")
dataLoc = os.path.join(os.path.dirname(__file__),"Resources/SP500_daily_price_data.csv")
df = pd.read_csv(dataLoc)
#Convert strings to Datetime format
df[df.columns[0]] = df[df.columns[0]].apply(lambda x: datetime.strptime(x, '%Y-%m-%d'))
df.index = df[df.columns[0]]
df.drop(df.columns[0], axis=1, inplace=True)
#Build Price Table
stockPrices = df[startDate:endDate]
print('Pulled data for {0} stocks from {1} to {2}'.format(len(stockPrices.columns), startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d')))
return stockPrices
def createIndexedPricing(stockPrices, startingIndexValue):
'''Takes a stock prices tables and converts to indexed pricing
(i.e. all prices are relative based on a common starting index value)
Inputs:
stockPrices => a panda DataFrame
startingIndexValue => the value that all prices will start at
'''
#Build Returns Table
stockReturns = stockPrices.pct_change(1)
#Build Indexed Price Table (indexed to 100)
indexedPrices = stockReturns + 1
indexedPrices.iloc[0] = startingIndexValue
indexedPrices = indexedPrices.cumprod(axis=0)
return indexedPrices | mit |
razvanphp/arangodb | 3rdParty/V8-3.31.74.1/third_party/python_26/Lib/site-packages/pythonwin/pywin/framework/startup.py | 17 | 2151 | # startup.py
#
"The main application startup code for PythonWin."
#
# This does the basic command line handling.
# Keep this as short as possible, cos error output is only redirected if
# this runs OK. Errors in imported modules are much better - the messages go somewhere (not any more :-)
import sys
import win32ui
import strop
# You may wish to redirect error output somewhere useful if you have startup errors.
# eg, 'import win32traceutil' will do this for you.
# import win32traceutil # Just uncomment this line to see error output!
# An old class I used to use - generally only useful if Pythonwin is running under MSVC
#class DebugOutput:
# softspace=1
# def write(self,message):
# win32ui.OutputDebug(message)
#sys.stderr=sys.stdout=DebugOutput()
# To fix a problem with Pythonwin when started from the Pythonwin directory,
# we update the pywin path to ensure it is absolute.
# If it is indeed relative, it will be relative to our current directory.
# If its already absolute, then this will have no affect.
import pywin, pywin.framework
pywin.__path__[0] = win32ui.FullPath(pywin.__path__[0])
pywin.framework.__path__[0] = win32ui.FullPath(pywin.framework.__path__[0])
# make a few wierd sys values. This is so later we can clobber sys.argv to trick
# scripts when running under a GUI environment.
moduleName = "intpyapp"
sys.appargvoffset = 0
sys.appargv = sys.argv[:]
# Must check for /app param here.
if len(sys.argv)>=2 and strop.lower(sys.argv[0])=='/app':
import cmdline
moduleName = cmdline.FixArgFileName(sys.argv[1])
sys.appargvoffset = 2
newargv=sys.argv[sys.appargvoffset:]
# newargv.insert(0, sys.argv[0])
sys.argv = newargv
exec "import %s\n" % moduleName
try:
win32ui.GetApp()._obj_
# This worked - an app already exists - do nothing more
except (AttributeError, win32ui.error):
# This means either no app object exists at all, or the one
# that does exist does not have a Python class (ie, was created
# by the host .EXE). In this case, we do the "old style" init...
import app
if app.AppBuilder is None:
raise TypeError, "No application object has been registered"
app.App = app.AppBuilder()
| apache-2.0 |
lmprice/ansible | docs/bin/generate_man.py | 26 | 8660 | #!/usr/bin/env python
import optparse
import os
import pprint
import sys
from jinja2 import Environment, FileSystemLoader
from ansible.module_utils._text import to_bytes
def generate_parser():
p = optparse.OptionParser(
version='%prog 1.0',
usage='usage: %prog [options]',
description='Generate cli documentation from cli docstrings',
)
p.add_option("-t", "--template-file", action="store", dest="template_file", default="../templates/man.j2", help="path to jinja2 template")
p.add_option("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/', help="Output directory for rst files")
p.add_option("-f", "--output-format", action="store", dest="output_format", default='man', help="Output format for docs (the default 'man' or 'rst')")
return p
# from https://www.python.org/dev/peps/pep-0257/
def trim_docstring(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def get_options(optlist):
''' get actual options '''
opts = []
for opt in optlist:
res = {
'desc': opt.help,
'options': opt._short_opts + opt._long_opts
}
if opt.action == 'store':
res['arg'] = opt.dest.upper()
opts.append(res)
return opts
def get_option_groups(option_parser):
groups = []
for option_group in option_parser.option_groups:
group_info = {}
group_info['desc'] = option_group.get_description()
group_info['options'] = option_group.option_list
group_info['group_obj'] = option_group
groups.append(group_info)
return groups
def opt_doc_list(cli):
''' iterate over options lists '''
results = []
for option_group in cli.parser.option_groups:
results.extend(get_options(option_group.option_list))
results.extend(get_options(cli.parser.option_list))
return results
# def opts_docs(cli, name):
def opts_docs(cli_class_name, cli_module_name):
''' generate doc structure from options '''
cli_name = 'ansible-%s' % cli_module_name
if cli_module_name == 'adhoc':
cli_name = 'ansible'
# WIth no action/subcommand
# shared opts set
# instantiate each cli and ask its options
cli_klass = getattr(__import__("ansible.cli.%s" % cli_module_name,
fromlist=[cli_class_name]), cli_class_name)
cli = cli_klass([])
# parse the common options
try:
cli.parse()
except:
pass
# base/common cli info
docs = {
'cli': cli_module_name,
'cli_name': cli_name,
'usage': cli.parser.usage,
'short_desc': cli.parser.description,
'long_desc': trim_docstring(cli.__doc__),
'actions': {},
}
option_info = {'option_names': [],
'options': [],
'groups': []}
for extras in ('ARGUMENTS'):
if hasattr(cli, extras):
docs[extras.lower()] = getattr(cli, extras)
common_opts = opt_doc_list(cli)
groups_info = get_option_groups(cli.parser)
shared_opt_names = []
for opt in common_opts:
shared_opt_names.extend(opt.get('options', []))
option_info['options'] = common_opts
option_info['option_names'] = shared_opt_names
option_info['groups'].extend(groups_info)
docs.update(option_info)
# now for each action/subcommand
# force populate parser with per action options
# use class attrs not the attrs on a instance (not that it matters here...)
for action in getattr(cli_klass, 'VALID_ACTIONS', ()):
# instantiate each cli and ask its options
action_cli_klass = getattr(__import__("ansible.cli.%s" % cli_module_name,
fromlist=[cli_class_name]), cli_class_name)
# init with args with action added?
cli = action_cli_klass([])
cli.args.append(action)
try:
cli.parse()
except:
pass
# FIXME/TODO: needed?
# avoid dupe errors
cli.parser.set_conflict_handler('resolve')
cli.set_action()
action_info = {'option_names': [],
'options': []}
# docs['actions'][action] = {}
# docs['actions'][action]['name'] = action
action_info['name'] = action
action_info['desc'] = trim_docstring(getattr(cli, 'execute_%s' % action).__doc__)
# docs['actions'][action]['desc'] = getattr(cli, 'execute_%s' % action).__doc__.strip()
action_doc_list = opt_doc_list(cli)
uncommon_options = []
for action_doc in action_doc_list:
# uncommon_options = []
option_aliases = action_doc.get('options', [])
for option_alias in option_aliases:
if option_alias in shared_opt_names:
continue
# TODO: use set
if option_alias not in action_info['option_names']:
action_info['option_names'].append(option_alias)
if action_doc in action_info['options']:
continue
uncommon_options.append(action_doc)
action_info['options'] = uncommon_options
docs['actions'][action] = action_info
docs['options'] = opt_doc_list(cli)
return docs
if __name__ == '__main__':
parser = generate_parser()
options, args = parser.parse_args()
template_file = options.template_file
template_path = os.path.expanduser(template_file)
template_dir = os.path.abspath(os.path.dirname(template_path))
template_basename = os.path.basename(template_file)
output_dir = os.path.abspath(options.output_dir)
output_format = options.output_format
cli_modules = args
# various cli parsing things checks sys.argv if the 'args' that are passed in are []
# so just remove any args so the cli modules dont try to parse them resulting in warnings
sys.argv = [sys.argv[0]]
# need to be in right dir
os.chdir(os.path.dirname(__file__))
allvars = {}
output = {}
cli_list = []
cli_bin_name_list = []
# for binary in os.listdir('../../lib/ansible/cli'):
for cli_module_name in cli_modules:
binary = os.path.basename(os.path.expanduser(cli_module_name))
if not binary.endswith('.py'):
continue
elif binary == '__init__.py':
continue
cli_name = os.path.splitext(binary)[0]
if cli_name == 'adhoc':
cli_class_name = 'AdHocCLI'
# myclass = 'AdHocCLI'
output[cli_name] = 'ansible.1.rst.in'
cli_bin_name = 'ansible'
else:
# myclass = "%sCLI" % libname.capitalize()
cli_class_name = "%sCLI" % cli_name.capitalize()
output[cli_name] = 'ansible-%s.1.rst.in' % cli_name
cli_bin_name = 'ansible-%s' % cli_name
# FIXME:
allvars[cli_name] = opts_docs(cli_class_name, cli_name)
cli_bin_name_list.append(cli_bin_name)
cli_list = allvars.keys()
doc_name_formats = {'man': '%s.1.rst.in',
'rst': '%s.rst'}
for cli_name in cli_list:
# template it!
env = Environment(loader=FileSystemLoader(template_dir))
template = env.get_template(template_basename)
# add rest to vars
tvars = allvars[cli_name]
tvars['cli_list'] = cli_list
tvars['cli_bin_name_list'] = cli_bin_name_list
tvars['cli'] = cli_name
if '-i' in tvars['options']:
print('uses inventory')
manpage = template.render(tvars)
filename = os.path.join(output_dir, doc_name_formats[output_format] % tvars['cli_name'])
with open(filename, 'wb') as f:
f.write(to_bytes(manpage))
print("Wrote doc to %s" % filename)
| gpl-3.0 |
RGood/praw | docs/conf.py | 2 | 1079 | import os
import sys
sys.path.insert(0, "..")
from praw import __version__
copyright = '2016, Bryce Boe'
exclude_patterns = ['_build']
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
html_static_path = ['_static']
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'collapse_navigation': True
}
html_use_smartypants = True
htmlhelp_basename = 'PRAW'
intersphinx_mapping = {'python': ('https://docs.python.org/3.5', None)}
master_doc = 'index'
nitpicky = True
project = 'PRAW'
pygments_style = 'sphinx'
release = __version__
source_suffix = '.rst'
suppress_warnings = ['image.nonlocal_uri']
version = '.'.join(__version__.split('.', 2)[:2])
# Use RTD theme locally
if not os.environ.get('READTHEDOCS'):
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
def skip(app, what, name, obj, skip, options):
if name in {'__call__', '__contains__', '__getitem__', '__init__',
'__iter__', '__len__'}:
return False
return skip
def setup(app):
app.connect('autodoc-skip-member', skip)
| bsd-2-clause |
GauravSahu/odoo | addons/website_event_track/models/event.py | 300 | 8344 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.addons.website.models.website import slug
import pytz
class event_track_tag(osv.osv):
_name = "event.track.tag"
_order = 'name'
_columns = {
'name': fields.char('Event Track Tag', translate=True)
}
class event_tag(osv.osv):
_name = "event.tag"
_order = 'name'
_columns = {
'name': fields.char('Event Tag', translate=True)
}
#
# Tracks: conferences
#
class event_track_stage(osv.osv):
_name = "event.track.stage"
_order = 'sequence'
_columns = {
'name': fields.char('Track Stage', translate=True),
'sequence': fields.integer('Sequence')
}
_defaults = {
'sequence': 0
}
class event_track_location(osv.osv):
_name = "event.track.location"
_columns = {
'name': fields.char('Track Rooms')
}
class event_track(osv.osv):
_name = "event.track"
_description = 'Event Tracks'
_order = 'priority, date'
_inherit = ['mail.thread', 'ir.needaction_mixin', 'website.seo.metadata']
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for track in self.browse(cr, uid, ids, context=context):
res[track.id] = "/event/%s/track/%s" % (slug(track.event_id), slug(track))
return res
_columns = {
'name': fields.char('Track Title', required=True, translate=True),
'user_id': fields.many2one('res.users', 'Responsible'),
'speaker_ids': fields.many2many('res.partner', string='Speakers'),
'tag_ids': fields.many2many('event.track.tag', string='Tags'),
'stage_id': fields.many2one('event.track.stage', 'Stage'),
'description': fields.html('Track Description', translate=True),
'date': fields.datetime('Track Date'),
'duration': fields.float('Duration', digits=(16,2)),
'location_id': fields.many2one('event.track.location', 'Location'),
'event_id': fields.many2one('event.event', 'Event', required=True),
'color': fields.integer('Color Index'),
'priority': fields.selection([('3','Low'),('2','Medium (*)'),('1','High (**)'),('0','Highest (***)')], 'Priority', required=True),
'website_published': fields.boolean('Available in the website', copy=False),
'website_url': fields.function(_website_url, string="Website url", type="char"),
'image': fields.related('speaker_ids', 'image', type='binary', readonly=True)
}
def set_priority(self, cr, uid, ids, priority, context={}):
return self.write(cr, uid, ids, {'priority' : priority})
def _default_stage_id(self, cr, uid, context={}):
stage_obj = self.pool.get('event.track.stage')
ids = stage_obj.search(cr, uid, [], context=context)
return ids and ids[0] or False
_defaults = {
'user_id': lambda self, cr, uid, ctx: uid,
'website_published': lambda self, cr, uid, ctx: False,
'duration': lambda *args: 1.5,
'stage_id': _default_stage_id,
'priority': '2'
}
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
stage_obj = self.pool.get('event.track.stage')
result = stage_obj.name_search(cr, uid, '', context=context)
return result, {}
_group_by_full = {
'stage_id': _read_group_stage_ids,
}
#
# Events
#
class event_event(osv.osv):
_inherit = "event.event"
def _list_tz(self,cr,uid, context=None):
# put POSIX 'Etc/*' entries at the end to avoid confusing users - see bug 1086728
return [(tz,tz) for tz in sorted(pytz.all_timezones, key=lambda tz: tz if not tz.startswith('Etc/') else '_')]
def _count_tracks(self, cr, uid, ids, field_name, arg, context=None):
return {
event.id: len(event.track_ids)
for event in self.browse(cr, uid, ids, context=context)
}
def _get_tracks_tag_ids(self, cr, uid, ids, field_names, arg=None, context=None):
res = dict((res_id, []) for res_id in ids)
for event in self.browse(cr, uid, ids, context=context):
for track in event.track_ids:
res[event.id] += [tag.id for tag in track.tag_ids]
res[event.id] = list(set(res[event.id]))
return res
_columns = {
'tag_ids': fields.many2many('event.tag', string='Tags'),
'track_ids': fields.one2many('event.track', 'event_id', 'Tracks', copy=True),
'sponsor_ids': fields.one2many('event.sponsor', 'event_id', 'Sponsorships', copy=True),
'blog_id': fields.many2one('blog.blog', 'Event Blog'),
'show_track_proposal': fields.boolean('Talks Proposals'),
'show_tracks': fields.boolean('Multiple Tracks'),
'show_blog': fields.boolean('News'),
'count_tracks': fields.function(_count_tracks, type='integer', string='Tracks'),
'tracks_tag_ids': fields.function(_get_tracks_tag_ids, type='one2many', relation='event.track.tag', string='Tags of Tracks'),
'allowed_track_tag_ids': fields.many2many('event.track.tag', string='Accepted Tags', help="List of available tags for track proposals."),
'timezone_of_event': fields.selection(_list_tz, 'Event Timezone', size=64),
}
_defaults = {
'show_track_proposal': False,
'show_tracks': False,
'show_blog': False,
'timezone_of_event':lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).tz,
}
def _get_new_menu_pages(self, cr, uid, event, context=None):
context = context or {}
result = super(event_event, self)._get_new_menu_pages(cr, uid, event, context=context)
if event.show_tracks:
result.append( (_('Talks'), '/event/%s/track' % slug(event)))
result.append( (_('Agenda'), '/event/%s/agenda' % slug(event)))
if event.blog_id:
result.append( (_('News'), '/blogpost'+slug(event.blog_ig)))
if event.show_track_proposal:
result.append( (_('Talk Proposals'), '/event/%s/track_proposal' % slug(event)))
return result
#
# Sponsors
#
class event_sponsors_type(osv.osv):
_name = "event.sponsor.type"
_order = "sequence"
_columns = {
"name": fields.char('Sponsor Type', required=True, translate=True),
"sequence": fields.integer('Sequence')
}
class event_sponsors(osv.osv):
_name = "event.sponsor"
_order = "sequence"
_columns = {
'event_id': fields.many2one('event.event', 'Event', required=True),
'sponsor_type_id': fields.many2one('event.sponsor.type', 'Sponsoring Type', required=True),
'partner_id': fields.many2one('res.partner', 'Sponsor/Customer', required=True),
'url': fields.text('Sponsor Website'),
'sequence': fields.related('sponsor_type_id', 'sequence', string='Sequence', store=True),
'image_medium': fields.related('partner_id', 'image_medium', string='Logo', type='binary')
}
def has_access_to_partner(self, cr, uid, ids, context=None):
partner_ids = [sponsor.partner_id.id for sponsor in self.browse(cr, uid, ids, context=context)]
return len(partner_ids) == self.pool.get("res.partner").search(cr, uid, [("id", "in", partner_ids)], count=True, context=context)
| agpl-3.0 |
JacobJacob/pyew | envi/archs/arm/__init__.py | 17 | 1417 |
"""
The initial arm module.
"""
import envi
from envi.archs.arm.regs import *
from envi.archs.arm.disasm import *
class ArmModule(envi.ArchitectureModule):
def __init__(self):
envi.ArchitectureModule.__init__(self, "armv6", maxinst=4)
self._arch_reg = self.archGetRegCtx()
self._arch_dis = ArmDisasm()
def setModeThumb(self):
self._arch_dis.setMode(MODE_THUMB)
def archGetRegCtx(self):
return ArmRegisterContext()
def archGetBreakInstr(self):
raise Exception ("weird... what are you trying to do here? ARM has a complex breakpoint instruction")
return
def getPointerSize(self):
return 4
def pointerString(self, va):
return "0x%.8x" % va
def prdisp(self, o):
# Just a displacement print helper
dabs = abs(o.disp)
if dabs > 4096:
if o.disp < 0:
return "- 0x%.8x" % dabs
else:
return "+ 0x%.8x" % dabs
else:
if o.disp < 0:
return "- %d" % dabs
else:
return "+ %d" % dabs
def makeOpcode(self, bytes, offset=0, va=0):
"""
Parse a sequence of bytes out into an envi.Opcode instance.
"""
return self._arch_dis.disasm(bytes, offset, va)
#def getEmulator(self):
#return ArmEmulator()
from envi.archs.arm.emu import *
| gpl-2.0 |
twiest/openshift-tools | openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/openshift_health_checker/library/docker_container.py | 41 | 76451 | #!/usr/bin/python
# pylint: skip-file
# flake8: noqa
# TODO: remove this file once openshift-ansible requires ansible >= 2.3.
# This file is a copy of
# https://github.com/ansible/ansible/blob/20bf02f/lib/ansible/modules/cloud/docker/docker_container.py.
# It has been temporarily vendored here due to issue https://github.com/ansible/ansible/issues/22323.
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: docker_container
short_description: manage docker containers
description:
- Manage the life cycle of docker containers.
- Supports check mode. Run with --check and --diff to view config difference and list of actions to be taken.
version_added: "2.1"
options:
blkio_weight:
description:
- Block IO (relative weight), between 10 and 1000.
default: null
required: false
capabilities:
description:
- List of capabilities to add to the container.
default: null
required: false
cleanup:
description:
- Use with I(detach) to remove the container after successful execution.
default: false
required: false
version_added: "2.2"
command:
description:
- Command to execute when the container starts.
default: null
required: false
cpu_period:
description:
- Limit CPU CFS (Completely Fair Scheduler) period
default: 0
required: false
cpu_quota:
description:
- Limit CPU CFS (Completely Fair Scheduler) quota
default: 0
required: false
cpuset_cpus:
description:
- CPUs in which to allow execution C(1,3) or C(1-3).
default: null
required: false
cpuset_mems:
description:
- Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1)
default: null
required: false
cpu_shares:
description:
- CPU shares (relative weight).
default: null
required: false
detach:
description:
- Enable detached mode to leave the container running in background.
If disabled, the task will reflect the status of the container run (failed if the command failed).
default: true
required: false
devices:
description:
- "List of host device bindings to add to the container. Each binding is a mapping expressed
in the format: <path_on_host>:<path_in_container>:<cgroup_permissions>"
default: null
required: false
dns_servers:
description:
- List of custom DNS servers.
default: null
required: false
dns_search_domains:
description:
- List of custom DNS search domains.
default: null
required: false
env:
description:
- Dictionary of key,value pairs.
default: null
required: false
env_file:
version_added: "2.2"
description:
- Path to a file containing environment variables I(FOO=BAR).
- If variable also present in C(env), then C(env) value will override.
- Requires docker-py >= 1.4.0.
default: null
required: false
entrypoint:
description:
- Command that overwrites the default ENTRYPOINT of the image.
default: null
required: false
etc_hosts:
description:
- Dict of host-to-IP mappings, where each host name is a key in the dictionary.
Each host name will be added to the container's /etc/hosts file.
default: null
required: false
exposed_ports:
description:
- List of additional container ports which informs Docker that the container
listens on the specified network ports at runtime.
If the port is already exposed using EXPOSE in a Dockerfile, it does not
need to be exposed again.
default: null
required: false
aliases:
- exposed
force_kill:
description:
- Use the kill command when stopping a running container.
default: false
required: false
groups:
description:
- List of additional group names and/or IDs that the container process will run as.
default: null
required: false
hostname:
description:
- Container hostname.
default: null
required: false
ignore_image:
description:
- When C(state) is I(present) or I(started) the module compares the configuration of an existing
container to requested configuration. The evaluation includes the image version. If
the image version in the registry does not match the container, the container will be
recreated. Stop this behavior by setting C(ignore_image) to I(True).
default: false
required: false
version_added: "2.2"
image:
description:
- Repository path and tag used to create the container. If an image is not found or pull is true, the image
will be pulled from the registry. If no tag is included, 'latest' will be used.
default: null
required: false
interactive:
description:
- Keep stdin open after a container is launched, even if not attached.
default: false
required: false
ipc_mode:
description:
- Set the IPC mode for the container. Can be one of 'container:<name|id>' to reuse another
container's IPC namespace or 'host' to use the host's IPC namespace within the container.
default: null
required: false
keep_volumes:
description:
- Retain volumes associated with a removed container.
default: true
required: false
kill_signal:
description:
- Override default signal used to kill a running container.
default null:
required: false
kernel_memory:
description:
- "Kernel memory limit (format: <number>[<unit>]). Number is a positive integer.
Unit can be one of b, k, m, or g. Minimum is 4M."
default: 0
required: false
labels:
description:
- Dictionary of key value pairs.
default: null
required: false
links:
description:
- List of name aliases for linked containers in the format C(container_name:alias)
default: null
required: false
log_driver:
description:
- Specify the logging driver. Docker uses json-file by default.
choices:
- none
- json-file
- syslog
- journald
- gelf
- fluentd
- awslogs
- splunk
default: null
required: false
log_options:
description:
- Dictionary of options specific to the chosen log_driver. See https://docs.docker.com/engine/admin/logging/overview/
for details.
required: false
default: null
mac_address:
description:
- Container MAC address (e.g. 92:d0:c6:0a:29:33)
default: null
required: false
memory:
description:
- "Memory limit (format: <number>[<unit>]). Number is a positive integer.
Unit can be one of b, k, m, or g"
default: 0
required: false
memory_reservation:
description:
- "Memory soft limit (format: <number>[<unit>]). Number is a positive integer.
Unit can be one of b, k, m, or g"
default: 0
required: false
memory_swap:
description:
- Total memory limit (memory + swap, format:<number>[<unit>]).
Number is a positive integer. Unit can be one of b, k, m, or g.
default: 0
required: false
memory_swappiness:
description:
- Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
default: 0
required: false
name:
description:
- Assign a name to a new container or match an existing container.
- When identifying an existing container name may be a name or a long or short container ID.
required: true
network_mode:
description:
- Connect the container to a network.
choices:
- bridge
- container:<name|id>
- host
- none
default: null
required: false
networks:
description:
- List of networks the container belongs to.
- Each network is a dict with keys C(name), C(ipv4_address), C(ipv6_address), C(links), C(aliases).
- For each network C(name) is required, all other keys are optional.
- If included, C(links) or C(aliases) are lists.
- For examples of the data structure and usage see EXAMPLES below.
- To remove a container from one or more networks, use the C(purge_networks) option.
default: null
required: false
version_added: "2.2"
oom_killer:
description:
- Whether or not to disable OOM Killer for the container.
default: false
required: false
oom_score_adj:
description:
- An integer value containing the score given to the container in order to tune OOM killer preferences.
default: 0
required: false
version_added: "2.2"
paused:
description:
- Use with the started state to pause running processes inside the container.
default: false
required: false
pid_mode:
description:
- Set the PID namespace mode for the container. Currently only supports 'host'.
default: null
required: false
privileged:
description:
- Give extended privileges to the container.
default: false
required: false
published_ports:
description:
- List of ports to publish from the container to the host.
- "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
container port, 9000 is a host port, and 0.0.0.0 is a host interface."
- Container ports must be exposed either in the Dockerfile or via the C(expose) option.
- A value of all will publish all exposed container ports to random host ports, ignoring
any other mappings.
- If C(networks) parameter is provided, will inspect each network to see if there exists
a bridge network with optional parameter com.docker.network.bridge.host_binding_ipv4.
If such a network is found, then published ports where no host IP address is specified
will be bound to the host IP pointed to by com.docker.network.bridge.host_binding_ipv4.
Note that the first bridge network with a com.docker.network.bridge.host_binding_ipv4
value encountered in the list of C(networks) is the one that will be used.
aliases:
- ports
required: false
default: null
pull:
description:
- If true, always pull the latest version of an image. Otherwise, will only pull an image when missing.
default: false
required: false
purge_networks:
description:
- Remove the container from ALL networks not included in C(networks) parameter.
- Any default networks such as I(bridge), if not found in C(networks), will be removed as well.
default: false
required: false
version_added: "2.2"
read_only:
description:
- Mount the container's root file system as read-only.
default: false
required: false
recreate:
description:
- Use with present and started states to force the re-creation of an existing container.
default: false
required: false
restart:
description:
- Use with started state to force a matching container to be stopped and restarted.
default: false
required: false
restart_policy:
description:
- Container restart policy. Place quotes around I(no) option.
choices:
- always
- no
- on-failure
- unless-stopped
default: on-failure
required: false
restart_retries:
description:
- Use with restart policy to control maximum number of restart attempts.
default: 0
required: false
shm_size:
description:
- Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater than `0`.
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes).
- Omitting the unit defaults to bytes. If you omit the size entirely, the system uses `64m`.
default: null
required: false
security_opts:
description:
- List of security options in the form of C("label:user:User")
default: null
required: false
state:
description:
- 'I(absent) - A container matching the specified name will be stopped and removed. Use force_kill to kill the container
rather than stopping it. Use keep_volumes to retain volumes associated with the removed container.'
- 'I(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
container matches the name, a container will be created. If a container matches the name but the provided configuration
does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
with the requested config. Image version will be taken into account when comparing configuration. To ignore image
version use the ignore_image option. Use the recreate option to force the re-creation of the matching container. Use
force_kill to kill the container rather than stopping it. Use keep_volumes to retain volumes associated with a removed
container.'
- 'I(started) - Asserts there is a running container matching the name and any provided configuration. If no container
matches the name, a container will be created and started. If a container matching the name is found but the
configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed
and a new container will be created with the requested configuration and started. Image version will be taken into
account when comparing configuration. To ignore image version use the ignore_image option. Use recreate to always
re-create a matching container, even if it is running. Use restart to force a matching container to be stopped and
restarted. Use force_kill to kill a container rather than stopping it. Use keep_volumes to retain volumes associated
with a removed container.'
- 'I(stopped) - Asserts that the container is first I(present), and then if the container is running moves it to a stopped
state. Use force_kill to kill a container rather than stopping it.'
required: false
default: started
choices:
- absent
- present
- stopped
- started
stop_signal:
description:
- Override default signal used to stop the container.
default: null
required: false
stop_timeout:
description:
- Number of seconds to wait for the container to stop before sending SIGKILL.
required: false
default: null
trust_image_content:
description:
- If true, skip image verification.
default: false
required: false
tty:
description:
- Allocate a psuedo-TTY.
default: false
required: false
ulimits:
description:
- "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)"
default: null
required: false
user:
description:
- Sets the username or UID used and optionally the groupname or GID for the specified command.
- "Can be [ user | user:group | uid | uid:gid | user:gid | uid:group ]"
default: null
required: false
uts:
description:
- Set the UTS namespace mode for the container.
default: null
required: false
volumes:
description:
- List of volumes to mount within the container.
- "Use docker CLI-style syntax: C(/host:/container[:mode])"
- You can specify a read mode for the mount with either C(ro) or C(rw).
- SELinux hosts can additionally use C(z) or C(Z) to use a shared or
private label for the volume.
default: null
required: false
volume_driver:
description:
- The container volume driver.
default: none
required: false
volumes_from:
description:
- List of container names or Ids to get volumes from.
default: null
required: false
extends_documentation_fragment:
- docker
author:
- "Cove Schneider (@cove)"
- "Joshua Conner (@joshuaconner)"
- "Pavel Antonov (@softzilla)"
- "Thomas Steinbach (@ThomasSteinbach)"
- "Philippe Jandot (@zfil)"
- "Daan Oosterveld (@dusdanig)"
- "James Tanner (@jctanner)"
- "Chris Houseknecht (@chouseknecht)"
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "Docker API >= 1.20"
'''
EXAMPLES = '''
- name: Create a data container
docker_container:
name: mydata
image: busybox
volumes:
- /data
- name: Re-create a redis container
docker_container:
name: myredis
image: redis
command: redis-server --appendonly yes
state: present
recreate: yes
exposed_ports:
- 6379
volumes_from:
- mydata
- name: Restart a container
docker_container:
name: myapplication
image: someuser/appimage
state: started
restart: yes
links:
- "myredis:aliasedredis"
devices:
- "/dev/sda:/dev/xvda:rwm"
ports:
- "8080:9000"
- "127.0.0.1:8081:9001/udp"
env:
SECRET_KEY: ssssh
- name: Container present
docker_container:
name: mycontainer
state: present
image: ubuntu:14.04
command: sleep infinity
- name: Stop a container
docker_container:
name: mycontainer
state: stopped
- name: Start 4 load-balanced containers
docker_container:
name: "container{{ item }}"
recreate: yes
image: someuser/anotherappimage
command: sleep 1d
with_sequence: count=4
- name: remove container
docker_container:
name: ohno
state: absent
- name: Syslogging output
docker_container:
name: myservice
image: busybox
log_driver: syslog
log_options:
syslog-address: tcp://my-syslog-server:514
syslog-facility: daemon
# NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for
# older docker installs, use "syslog-tag" instead
tag: myservice
- name: Create db container and connect to network
docker_container:
name: db_test
image: "postgres:latest"
networks:
- name: "{{ docker_network_name }}"
- name: Start container, connect to network and link
docker_container:
name: sleeper
image: ubuntu:14.04
networks:
- name: TestingNet
ipv4_address: "172.1.1.100"
aliases:
- sleepyzz
links:
- db_test:db
- name: TestingNet2
- name: Start a container with a command
docker_container:
name: sleepy
image: ubuntu:14.04
command: sleep infinity
- name: Add container to networks
docker_container:
name: sleepy
networks:
- name: TestingNet
ipv4_address: 172.1.1.18
links:
- sleeper
- name: TestingNet2
ipv4_address: 172.1.10.20
- name: Update network with aliases
docker_container:
name: sleepy
networks:
- name: TestingNet
aliases:
- sleepyz
- zzzz
- name: Remove container from one network
docker_container:
name: sleepy
networks:
- name: TestingNet2
purge_networks: yes
- name: Remove container from all networks
docker_container:
name: sleepy
purge_networks: yes
'''
RETURN = '''
docker_container:
description:
- Before 2.3 this was 'ansible_docker_container' but was renamed due to conflicts with the connection plugin.
- Facts representing the current state of the container. Matches the docker inspection output.
- Note that facts are not part of registered vars but accessible directly.
- Empty if C(state) is I(absent)
- If detached is I(False), will include Output attribute containing any output from container run.
returned: always
type: dict
sample: '{
"AppArmorProfile": "",
"Args": [],
"Config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/usr/bin/supervisord"
],
"Domainname": "",
"Entrypoint": null,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"ExposedPorts": {
"443/tcp": {},
"80/tcp": {}
},
"Hostname": "8e47bf643eb9",
"Image": "lnmp_nginx:v1",
"Labels": {},
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": {
"/tmp/lnmp/nginx-sites/logs/": {}
},
...
}'
'''
import re
from ansible.module_utils.docker_common import *
try:
from docker import utils
if HAS_DOCKER_PY_2:
from docker.types import Ulimit
else:
from docker.utils.types import Ulimit
except:
# missing docker-py handled in ansible.module_utils.docker
pass
REQUIRES_CONVERSION_TO_BYTES = [
'memory',
'memory_reservation',
'memory_swap',
'shm_size'
]
VOLUME_PERMISSIONS = ('rw', 'ro', 'z', 'Z')
class TaskParameters(DockerBaseClass):
'''
Access and parse module parameters
'''
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.blkio_weight = None
self.capabilities = None
self.cleanup = None
self.command = None
self.cpu_period = None
self.cpu_quota = None
self.cpuset_cpus = None
self.cpuset_mems = None
self.cpu_shares = None
self.detach = None
self.debug = None
self.devices = None
self.dns_servers = None
self.dns_opts = None
self.dns_search_domains = None
self.env = None
self.env_file = None
self.entrypoint = None
self.etc_hosts = None
self.exposed_ports = None
self.force_kill = None
self.groups = None
self.hostname = None
self.ignore_image = None
self.image = None
self.interactive = None
self.ipc_mode = None
self.keep_volumes = None
self.kernel_memory = None
self.kill_signal = None
self.labels = None
self.links = None
self.log_driver = None
self.log_options = None
self.mac_address = None
self.memory = None
self.memory_reservation = None
self.memory_swap = None
self.memory_swappiness = None
self.name = None
self.network_mode = None
self.networks = None
self.oom_killer = None
self.oom_score_adj = None
self.paused = None
self.pid_mode = None
self.privileged = None
self.purge_networks = None
self.pull = None
self.read_only = None
self.recreate = None
self.restart = None
self.restart_retries = None
self.restart_policy = None
self.shm_size = None
self.security_opts = None
self.state = None
self.stop_signal = None
self.stop_timeout = None
self.trust_image_content = None
self.tty = None
self.user = None
self.uts = None
self.volumes = None
self.volume_binds = dict()
self.volumes_from = None
self.volume_driver = None
for key, value in client.module.params.items():
setattr(self, key, value)
for param_name in REQUIRES_CONVERSION_TO_BYTES:
if client.module.params.get(param_name):
try:
setattr(self, param_name, human_to_bytes(client.module.params.get(param_name)))
except ValueError as exc:
self.fail("Failed to convert %s to bytes: %s" % (param_name, exc))
self.publish_all_ports = False
self.published_ports = self._parse_publish_ports()
if self.published_ports in ('all', 'ALL'):
self.publish_all_ports = True
self.published_ports = None
self.ports = self._parse_exposed_ports(self.published_ports)
self.log("expose ports:")
self.log(self.ports, pretty_print=True)
self.links = self._parse_links(self.links)
if self.volumes:
self.volumes = self._expand_host_paths()
self.env = self._get_environment()
self.ulimits = self._parse_ulimits()
self.log_config = self._parse_log_config()
self.exp_links = None
self.volume_binds = self._get_volume_binds(self.volumes)
self.log("volumes:")
self.log(self.volumes, pretty_print=True)
self.log("volume binds:")
self.log(self.volume_binds, pretty_print=True)
if self.networks:
for network in self.networks:
if not network.get('name'):
self.fail("Parameter error: network must have a name attribute.")
network['id'] = self._get_network_id(network['name'])
if not network['id']:
self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name'])
if network.get('links'):
network['links'] = self._parse_links(network['links'])
def fail(self, msg):
self.client.module.fail_json(msg=msg)
@property
def update_parameters(self):
'''
Returns parameters used to update a container
'''
update_parameters = dict(
blkio_weight='blkio_weight',
cpu_period='cpu_period',
cpu_quota='cpu_quota',
cpu_shares='cpu_shares',
cpuset_cpus='cpuset_cpus',
mem_limit='memory',
mem_reservation='mem_reservation',
memswap_limit='memory_swap',
kernel_memory='kernel_memory'
)
result = dict()
for key, value in update_parameters.items():
if getattr(self, value, None) is not None:
result[key] = getattr(self, value)
return result
@property
def create_parameters(self):
'''
Returns parameters used to create a container
'''
create_params = dict(
command='command',
hostname='hostname',
user='user',
detach='detach',
stdin_open='interactive',
tty='tty',
ports='ports',
environment='env',
name='name',
entrypoint='entrypoint',
cpu_shares='cpu_shares',
mac_address='mac_address',
labels='labels',
stop_signal='stop_signal',
volume_driver='volume_driver',
)
result = dict(
host_config=self._host_config(),
volumes=self._get_mounts(),
)
for key, value in create_params.items():
if getattr(self, value, None) is not None:
result[key] = getattr(self, value)
return result
def _expand_host_paths(self):
new_vols = []
for vol in self.volumes:
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if re.match(r'[\.~]', host):
host = os.path.abspath(host)
new_vols.append("%s:%s:%s" % (host, container, mode))
continue
elif len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS and re.match(r'[\.~]', parts[0]):
host = os.path.abspath(parts[0])
new_vols.append("%s:%s:rw" % (host, parts[1]))
continue
new_vols.append(vol)
return new_vols
def _get_mounts(self):
'''
Return a list of container mounts.
:return:
'''
result = []
if self.volumes:
for vol in self.volumes:
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, _ = vol.split(':')
result.append(container)
continue
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
result.append(parts[1])
continue
result.append(vol)
self.log("mounts:")
self.log(result, pretty_print=True)
return result
def _host_config(self):
'''
Returns parameters used to create a HostConfig object
'''
host_config_params=dict(
port_bindings='published_ports',
publish_all_ports='publish_all_ports',
links='links',
privileged='privileged',
dns='dns_servers',
dns_search='dns_search_domains',
binds='volume_binds',
volumes_from='volumes_from',
network_mode='network_mode',
cap_add='capabilities',
extra_hosts='etc_hosts',
read_only='read_only',
ipc_mode='ipc_mode',
security_opt='security_opts',
ulimits='ulimits',
log_config='log_config',
mem_limit='memory',
memswap_limit='memory_swap',
mem_swappiness='memory_swappiness',
oom_score_adj='oom_score_adj',
shm_size='shm_size',
group_add='groups',
devices='devices',
pid_mode='pid_mode'
)
params = dict()
for key, value in host_config_params.items():
if getattr(self, value, None) is not None:
params[key] = getattr(self, value)
if self.restart_policy:
params['restart_policy'] = dict(Name=self.restart_policy,
MaximumRetryCount=self.restart_retries)
return self.client.create_host_config(**params)
@property
def default_host_ip(self):
ip = '0.0.0.0'
if not self.networks:
return ip
for net in self.networks:
if net.get('name'):
network = self.client.inspect_network(net['name'])
if network.get('Driver') == 'bridge' and \
network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
break
return ip
def _parse_publish_ports(self):
'''
Parse ports from docker CLI syntax
'''
if self.published_ports is None:
return None
if 'all' in self.published_ports:
return 'all'
default_ip = self.default_host_ip
binds = {}
for port in self.published_ports:
parts = str(port).split(':')
container_port = parts[-1]
if '/' not in container_port:
container_port = int(parts[-1])
p_len = len(parts)
if p_len == 1:
bind = (default_ip,)
elif p_len == 2:
bind = (default_ip, int(parts[0]))
elif p_len == 3:
bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],)
if container_port in binds:
old_bind = binds[container_port]
if isinstance(old_bind, list):
old_bind.append(bind)
else:
binds[container_port] = [binds[container_port], bind]
else:
binds[container_port] = bind
return binds
@staticmethod
def _get_volume_binds(volumes):
'''
Extract host bindings, if any, from list of volume mapping strings.
:return: dictionary of bind mappings
'''
result = dict()
if volumes:
for vol in volumes:
host = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
host, container, mode = (vol.split(':') + ['rw'])
if host is not None:
result[host] = dict(
bind=container,
mode=mode
)
return result
def _parse_exposed_ports(self, published_ports):
'''
Parse exposed ports from docker CLI-style ports syntax.
'''
exposed = []
if self.exposed_ports:
for port in self.exposed_ports:
port = str(port).strip()
protocol = 'tcp'
match = re.search(r'(/.+$)', port)
if match:
protocol = match.group(1).replace('/', '')
port = re.sub(r'/.+$', '', port)
exposed.append((port, protocol))
if published_ports:
# Any published port should also be exposed
for publish_port in published_ports:
match = False
if isinstance(publish_port, basestring) and '/' in publish_port:
port, protocol = publish_port.split('/')
port = int(port)
else:
protocol = 'tcp'
port = int(publish_port)
for exposed_port in exposed:
if isinstance(exposed_port[0], basestring) and '-' in exposed_port[0]:
start_port, end_port = exposed_port[0].split('-')
if int(start_port) <= port <= int(end_port):
match = True
elif exposed_port[0] == port:
match = True
if not match:
exposed.append((port, protocol))
return exposed
@staticmethod
def _parse_links(links):
'''
Turn links into a dictionary
'''
if links is None:
return None
result = {}
for link in links:
parsed_link = link.split(':', 1)
if len(parsed_link) == 2:
result[parsed_link[0]] = parsed_link[1]
else:
result[parsed_link[0]] = parsed_link[0]
return result
def _parse_ulimits(self):
'''
Turn ulimits into an array of Ulimit objects
'''
if self.ulimits is None:
return None
results = []
for limit in self.ulimits:
limits = dict()
pieces = limit.split(':')
if len(pieces) >= 2:
limits['name'] = pieces[0]
limits['soft'] = int(pieces[1])
limits['hard'] = int(pieces[1])
if len(pieces) == 3:
limits['hard'] = int(pieces[2])
try:
results.append(Ulimit(**limits))
except ValueError as exc:
self.fail("Error parsing ulimits value %s - %s" % (limit, exc))
return results
def _parse_log_config(self):
'''
Create a LogConfig object
'''
if self.log_driver is None:
return None
options = dict(
Type=self.log_driver,
Config = dict()
)
if self.log_options is not None:
options['Config'] = self.log_options
try:
return LogConfig(**options)
except ValueError as exc:
self.fail('Error parsing logging options - %s' % (exc))
def _get_environment(self):
"""
If environment file is combined with explicit environment variables, the explicit environment variables
take precedence.
"""
final_env = {}
if self.env_file:
parsed_env_file = utils.parse_env_file(self.env_file)
for name, value in parsed_env_file.items():
final_env[name] = str(value)
if self.env:
for name, value in self.env.items():
final_env[name] = str(value)
return final_env
def _get_network_id(self, network_name):
network_id = None
try:
for network in self.client.networks(names=[network_name]):
if network['Name'] == network_name:
network_id = network['Id']
break
except Exception as exc:
self.fail("Error getting network id for %s - %s" % (network_name, str(exc)))
return network_id
class Container(DockerBaseClass):
def __init__(self, container, parameters):
super(Container, self).__init__()
self.raw = container
self.Id = None
self.container = container
if container:
self.Id = container['Id']
self.Image = container['Image']
self.log(self.container, pretty_print=True)
self.parameters = parameters
self.parameters.expected_links = None
self.parameters.expected_ports = None
self.parameters.expected_exposed = None
self.parameters.expected_volumes = None
self.parameters.expected_ulimits = None
self.parameters.expected_etc_hosts = None
self.parameters.expected_env = None
def fail(self, msg):
self.parameters.client.module.fail_json(msg=msg)
@property
def exists(self):
return True if self.container else False
@property
def running(self):
if self.container and self.container.get('State'):
if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False):
return True
return False
def has_different_configuration(self, image):
'''
Diff parameters vs existing container config. Returns tuple: (True | False, List of differences)
'''
self.log('Starting has_different_configuration')
self.parameters.expected_entrypoint = self._get_expected_entrypoint()
self.parameters.expected_links = self._get_expected_links()
self.parameters.expected_ports = self._get_expected_ports()
self.parameters.expected_exposed = self._get_expected_exposed(image)
self.parameters.expected_volumes = self._get_expected_volumes(image)
self.parameters.expected_binds = self._get_expected_binds(image)
self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits)
self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts')
self.parameters.expected_env = self._get_expected_env(image)
self.parameters.expected_cmd = self._get_expected_cmd()
self.parameters.expected_devices = self._get_expected_devices()
if not self.container.get('HostConfig'):
self.fail("has_config_diff: Error parsing container properties. HostConfig missing.")
if not self.container.get('Config'):
self.fail("has_config_diff: Error parsing container properties. Config missing.")
if not self.container.get('NetworkSettings'):
self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.")
host_config = self.container['HostConfig']
log_config = host_config.get('LogConfig', dict())
restart_policy = host_config.get('RestartPolicy', dict())
config = self.container['Config']
network = self.container['NetworkSettings']
# The previous version of the docker module ignored the detach state by
# assuming if the container was running, it must have been detached.
detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
# "ExposedPorts": null returns None type & causes AttributeError - PR #5517
if config.get('ExposedPorts') is not None:
expected_exposed = [re.sub(r'/.+$', '', p) for p in config.get('ExposedPorts', dict()).keys()]
else:
expected_exposed = []
# Map parameters to container inspect results
config_mapping = dict(
image=config.get('Image'),
expected_cmd=config.get('Cmd'),
hostname=config.get('Hostname'),
user=config.get('User'),
detach=detach,
interactive=config.get('OpenStdin'),
capabilities=host_config.get('CapAdd'),
expected_devices=host_config.get('Devices'),
dns_servers=host_config.get('Dns'),
dns_opts=host_config.get('DnsOptions'),
dns_search_domains=host_config.get('DnsSearch'),
expected_env=(config.get('Env') or []),
expected_entrypoint=config.get('Entrypoint'),
expected_etc_hosts=host_config['ExtraHosts'],
expected_exposed=expected_exposed,
groups=host_config.get('GroupAdd'),
ipc_mode=host_config.get("IpcMode"),
labels=config.get('Labels'),
expected_links=host_config.get('Links'),
log_driver=log_config.get('Type'),
log_options=log_config.get('Config'),
mac_address=network.get('MacAddress'),
memory_swappiness=host_config.get('MemorySwappiness'),
network_mode=host_config.get('NetworkMode'),
oom_killer=host_config.get('OomKillDisable'),
oom_score_adj=host_config.get('OomScoreAdj'),
pid_mode=host_config.get('PidMode'),
privileged=host_config.get('Privileged'),
expected_ports=host_config.get('PortBindings'),
read_only=host_config.get('ReadonlyRootfs'),
restart_policy=restart_policy.get('Name'),
restart_retries=restart_policy.get('MaximumRetryCount'),
# Cannot test shm_size, as shm_size is not included in container inspection results.
# shm_size=host_config.get('ShmSize'),
security_opts=host_config.get("SecuriytOpt"),
stop_signal=config.get("StopSignal"),
tty=config.get('Tty'),
expected_ulimits=host_config.get('Ulimits'),
uts=host_config.get('UTSMode'),
expected_volumes=config.get('Volumes'),
expected_binds=host_config.get('Binds'),
volumes_from=host_config.get('VolumesFrom'),
volume_driver=host_config.get('VolumeDriver')
)
differences = []
for key, value in config_mapping.items():
self.log('check differences %s %s vs %s' % (key, getattr(self.parameters, key), str(value)))
if getattr(self.parameters, key, None) is not None:
if isinstance(getattr(self.parameters, key), list) and isinstance(value, list):
if len(getattr(self.parameters, key)) > 0 and isinstance(getattr(self.parameters, key)[0], dict):
# compare list of dictionaries
self.log("comparing list of dict: %s" % key)
match = self._compare_dictionary_lists(getattr(self.parameters, key), value)
else:
# compare two lists. Is list_a in list_b?
self.log("comparing lists: %s" % key)
set_a = set(getattr(self.parameters, key))
set_b = set(value)
match = (set_a <= set_b)
elif isinstance(getattr(self.parameters, key), dict) and isinstance(value, dict):
# compare two dicts
self.log("comparing two dicts: %s" % key)
match = self._compare_dicts(getattr(self.parameters, key), value)
else:
# primitive compare
self.log("primitive compare: %s" % key)
match = (getattr(self.parameters, key) == value)
if not match:
# no match. record the differences
item = dict()
item[key] = dict(
parameter=getattr(self.parameters, key),
container=value
)
differences.append(item)
has_differences = True if len(differences) > 0 else False
return has_differences, differences
def _compare_dictionary_lists(self, list_a, list_b):
'''
If all of list_a exists in list_b, return True
'''
if not isinstance(list_a, list) or not isinstance(list_b, list):
return False
matches = 0
for dict_a in list_a:
for dict_b in list_b:
if self._compare_dicts(dict_a, dict_b):
matches += 1
break
result = (matches == len(list_a))
return result
def _compare_dicts(self, dict_a, dict_b):
'''
If dict_a in dict_b, return True
'''
if not isinstance(dict_a, dict) or not isinstance(dict_b, dict):
return False
for key, value in dict_a.items():
if isinstance(value, dict):
match = self._compare_dicts(value, dict_b.get(key))
elif isinstance(value, list):
if len(value) > 0 and isinstance(value[0], dict):
match = self._compare_dictionary_lists(value, dict_b.get(key))
else:
set_a = set(value)
set_b = set(dict_b.get(key))
match = (set_a == set_b)
else:
match = (value == dict_b.get(key))
if not match:
return False
return True
def has_different_resource_limits(self):
'''
Diff parameters and container resource limits
'''
if not self.container.get('HostConfig'):
self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.")
host_config = self.container['HostConfig']
config_mapping = dict(
cpu_period=host_config.get('CpuPeriod'),
cpu_quota=host_config.get('CpuQuota'),
cpuset_cpus=host_config.get('CpusetCpus'),
cpuset_mems=host_config.get('CpusetMems'),
cpu_shares=host_config.get('CpuShares'),
kernel_memory=host_config.get("KernelMemory"),
memory=host_config.get('Memory'),
memory_reservation=host_config.get('MemoryReservation'),
memory_swap=host_config.get('MemorySwap'),
oom_score_adj=host_config.get('OomScoreAdj'),
)
differences = []
for key, value in config_mapping.items():
if getattr(self.parameters, key, None) and getattr(self.parameters, key) != value:
# no match. record the differences
item = dict()
item[key] = dict(
parameter=getattr(self.parameters, key),
container=value
)
differences.append(item)
different = (len(differences) > 0)
return different, differences
def has_network_differences(self):
'''
Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
'''
different = False
differences = []
if not self.parameters.networks:
return different, differences
if not self.container.get('NetworkSettings'):
self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings']['Networks']
for network in self.parameters.networks:
if connected_networks.get(network['name'], None) is None:
different = True
differences.append(dict(
parameter=network,
container=None
))
else:
diff = False
if network.get('ipv4_address') and network['ipv4_address'] != connected_networks[network['name']].get('IPAddress'):
diff = True
if network.get('ipv6_address') and network['ipv6_address'] != connected_networks[network['name']].get('GlobalIPv6Address'):
diff = True
if network.get('aliases') and not connected_networks[network['name']].get('Aliases'):
diff = True
if network.get('aliases') and connected_networks[network['name']].get('Aliases'):
for alias in network.get('aliases'):
if alias not in connected_networks[network['name']].get('Aliases', []):
diff = True
if network.get('links') and not connected_networks[network['name']].get('Links'):
diff = True
if network.get('links') and connected_networks[network['name']].get('Links'):
expected_links = []
for link, alias in network['links'].items():
expected_links.append("%s:%s" % (link, alias))
for link in expected_links:
if link not in connected_networks[network['name']].get('Links', []):
diff = True
if diff:
different = True
differences.append(dict(
parameter=network,
container=dict(
name=network['name'],
ipv4_address=connected_networks[network['name']].get('IPAddress'),
ipv6_address=connected_networks[network['name']].get('GlobalIPv6Address'),
aliases=connected_networks[network['name']].get('Aliases'),
links=connected_networks[network['name']].get('Links')
)
))
return different, differences
def has_extra_networks(self):
'''
Check if the container is connected to non-requested networks
'''
extra_networks = []
extra = False
if not self.container.get('NetworkSettings'):
self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings'].get('Networks')
if connected_networks:
for network, network_config in connected_networks.items():
keep = False
if self.parameters.networks:
for expected_network in self.parameters.networks:
if expected_network['name'] == network:
keep = True
if not keep:
extra = True
extra_networks.append(dict(name=network, id=network_config['NetworkID']))
return extra, extra_networks
def _get_expected_devices(self):
if not self.parameters.devices:
return None
expected_devices = []
for device in self.parameters.devices:
parts = device.split(':')
if len(parts) == 1:
expected_devices.append(
dict(
CgroupPermissions='rwm',
PathInContainer=parts[0],
PathOnHost=parts[0]
))
elif len(parts) == 2:
parts = device.split(':')
expected_devices.append(
dict(
CgroupPermissions='rwm',
PathInContainer=parts[1],
PathOnHost=parts[0]
)
)
else:
expected_devices.append(
dict(
CgroupPermissions=parts[2],
PathInContainer=parts[1],
PathOnHost=parts[0]
))
return expected_devices
def _get_expected_entrypoint(self):
self.log('_get_expected_entrypoint')
if not self.parameters.entrypoint:
return None
return shlex.split(self.parameters.entrypoint)
def _get_expected_ports(self):
if not self.parameters.published_ports:
return None
expected_bound_ports = {}
for container_port, config in self.parameters.published_ports.items():
if isinstance(container_port, int):
container_port = "%s/tcp" % container_port
if len(config) == 1:
expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}]
elif isinstance(config[0], tuple):
expected_bound_ports[container_port] = []
for host_ip, host_port in config:
expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': str(host_port)})
else:
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}]
return expected_bound_ports
def _get_expected_links(self):
if self.parameters.links is None:
return None
self.log('parameter links:')
self.log(self.parameters.links, pretty_print=True)
exp_links = []
for link, alias in self.parameters.links.items():
exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias))
return exp_links
def _get_expected_binds(self, image):
self.log('_get_expected_binds')
image_vols = []
if image:
image_vols = self._get_image_binds(image['ContainerConfig'].get('Volumes'))
param_vols = []
if self.parameters.volumes:
for vol in self.parameters.volumes:
host = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
host, container, mode = vol.split(':') + ['rw']
if host:
param_vols.append("%s:%s:%s" % (host, container, mode))
result = list(set(image_vols + param_vols))
self.log("expected_binds:")
self.log(result, pretty_print=True)
return result
def _get_image_binds(self, volumes):
'''
Convert array of binds to array of strings with format host_path:container_path:mode
:param volumes: array of bind dicts
:return: array of strings
'''
results = []
if isinstance(volumes, dict):
results += self._get_bind_from_dict(volumes)
elif isinstance(volumes, list):
for vol in volumes:
results += self._get_bind_from_dict(vol)
return results
@staticmethod
def _get_bind_from_dict(volume_dict):
results = []
if volume_dict:
for host_path, config in volume_dict.items():
if isinstance(config, dict) and config.get('bind'):
container_path = config.get('bind')
mode = config.get('mode', 'rw')
results.append("%s:%s:%s" % (host_path, container_path, mode))
return results
def _get_expected_volumes(self, image):
self.log('_get_expected_volumes')
expected_vols = dict()
if image and image['ContainerConfig'].get('Volumes'):
expected_vols.update(image['ContainerConfig'].get('Volumes'))
if self.parameters.volumes:
for vol in self.parameters.volumes:
container = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
host, container, mode = vol.split(':') + ['rw']
new_vol = dict()
if container:
new_vol[container] = dict()
else:
new_vol[vol] = dict()
expected_vols.update(new_vol)
if not expected_vols:
expected_vols = None
self.log("expected_volumes:")
self.log(expected_vols, pretty_print=True)
return expected_vols
def _get_expected_env(self, image):
self.log('_get_expected_env')
expected_env = dict()
if image and image['ContainerConfig'].get('Env'):
for env_var in image['ContainerConfig']['Env']:
parts = env_var.split('=', 1)
expected_env[parts[0]] = parts[1]
if self.parameters.env:
expected_env.update(self.parameters.env)
param_env = []
for key, value in expected_env.items():
param_env.append("%s=%s" % (key, value))
return param_env
def _get_expected_exposed(self, image):
self.log('_get_expected_exposed')
image_ports = []
if image:
image_ports = [re.sub(r'/.+$', '', p) for p in (image['ContainerConfig'].get('ExposedPorts') or {}).keys()]
param_ports = []
if self.parameters.ports:
param_ports = [str(p[0]) for p in self.parameters.ports]
result = list(set(image_ports + param_ports))
self.log(result, pretty_print=True)
return result
def _get_expected_ulimits(self, config_ulimits):
self.log('_get_expected_ulimits')
if config_ulimits is None:
return None
results = []
for limit in config_ulimits:
results.append(dict(
Name=limit.name,
Soft=limit.soft,
Hard=limit.hard
))
return results
def _get_expected_cmd(self):
self.log('_get_expected_cmd')
if not self.parameters.command:
return None
return shlex.split(self.parameters.command)
def _convert_simple_dict_to_list(self, param_name, join_with=':'):
if getattr(self.parameters, param_name, None) is None:
return None
results = []
for key, value in getattr(self.parameters, param_name).items():
results.append("%s%s%s" % (key, join_with, value))
return results
class ContainerManager(DockerBaseClass):
'''
Perform container management tasks
'''
def __init__(self, client):
super(ContainerManager, self).__init__()
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {'changed': False, 'actions': []}
self.diff = {}
self.facts = {}
state = self.parameters.state
if state in ('stopped', 'started', 'present'):
self.present(state)
elif state == 'absent':
self.absent()
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
if self.client.module._diff or self.parameters.debug:
self.results['diff'] = self.diff
if self.facts:
self.results['ansible_facts'] = {'docker_container': self.facts}
def present(self, state):
container = self._get_container(self.parameters.name)
image = self._get_image()
if not container.exists:
# New container
self.log('No container found')
new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
if new_container:
container = new_container
else:
# Existing container
different, differences = container.has_different_configuration(image)
image_different = False
if not self.parameters.ignore_image:
image_different = self._image_is_different(image, container)
if image_different or different or self.parameters.recreate:
self.diff['differences'] = differences
if image_different:
self.diff['image_different'] = True
self.log("differences")
self.log(differences, pretty_print=True)
if container.running:
self.container_stop(container.Id)
self.container_remove(container.Id)
new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
if new_container:
container = new_container
if container and container.exists:
container = self.update_limits(container)
container = self.update_networks(container)
if state == 'started' and not container.running:
container = self.container_start(container.Id)
elif state == 'started' and self.parameters.restart:
self.container_stop(container.Id)
container = self.container_start(container.Id)
elif state == 'stopped' and container.running:
self.container_stop(container.Id)
container = self._get_container(container.Id)
self.facts = container.raw
def absent(self):
container = self._get_container(self.parameters.name)
if container.exists:
if container.running:
self.container_stop(container.Id)
self.container_remove(container.Id)
def fail(self, msg, **kwargs):
self.client.module.fail_json(msg=msg, **kwargs)
def _get_container(self, container):
'''
Expects container ID or Name. Returns a container object
'''
return Container(self.client.get_container(container), self.parameters)
def _get_image(self):
if not self.parameters.image:
self.log('No image specified')
return None
repository, tag = utils.parse_repository_tag(self.parameters.image)
if not tag:
tag = "latest"
image = self.client.find_image(repository, tag)
if not self.check_mode:
if not image or self.parameters.pull:
self.log("Pull the image.")
image, alreadyToLatest = self.client.pull_image(repository, tag)
if alreadyToLatest:
self.results['changed'] = False
else:
self.results['changed'] = True
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
self.log("image")
self.log(image, pretty_print=True)
return image
def _image_is_different(self, image, container):
if image and image.get('Id'):
if container and container.Image:
if image.get('Id') != container.Image:
return True
return False
def update_limits(self, container):
limits_differ, different_limits = container.has_different_resource_limits()
if limits_differ:
self.log("limit differences:")
self.log(different_limits, pretty_print=True)
if limits_differ and not self.check_mode:
self.container_update(container.Id, self.parameters.update_parameters)
return self._get_container(container.Id)
return container
def update_networks(self, container):
has_network_differences, network_differences = container.has_network_differences()
updated_container = container
if has_network_differences:
if self.diff.get('differences'):
self.diff['differences'].append(dict(network_differences=network_differences))
else:
self.diff['differences'] = [dict(network_differences=network_differences)]
self.results['changed'] = True
updated_container = self._add_networks(container, network_differences)
if self.parameters.purge_networks:
has_extra_networks, extra_networks = container.has_extra_networks()
if has_extra_networks:
if self.diff.get('differences'):
self.diff['differences'].append(dict(purge_networks=extra_networks))
else:
self.diff['differences'] = [dict(purge_networks=extra_networks)]
self.results['changed'] = True
updated_container = self._purge_networks(container, extra_networks)
return updated_container
def _add_networks(self, container, differences):
for diff in differences:
# remove the container from the network, if connected
if diff.get('container'):
self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, diff['parameter']['id'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
str(exc)))
# connect to the network
params = dict(
ipv4_address=diff['parameter'].get('ipv4_address', None),
ipv6_address=diff['parameter'].get('ipv6_address', None),
links=diff['parameter'].get('links', None),
aliases=diff['parameter'].get('aliases', None)
)
self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params))
if not self.check_mode:
try:
self.log("Connecting container to network %s" % diff['parameter']['id'])
self.log(params, pretty_print=True)
self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params)
except Exception as exc:
self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], str(exc)))
return self._get_container(container.Id)
def _purge_networks(self, container, networks):
for network in networks:
self.results['actions'].append(dict(removed_from_network=network['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, network['name'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (network['name'],
str(exc)))
return self._get_container(container.Id)
def container_create(self, image, create_parameters):
self.log("create container")
self.log("image: %s parameters:" % image)
self.log(create_parameters, pretty_print=True)
self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
self.results['changed'] = True
new_container = None
if not self.check_mode:
try:
new_container = self.client.create_container(image, **create_parameters)
except Exception as exc:
self.fail("Error creating container: %s" % str(exc))
return self._get_container(new_container['Id'])
return new_container
def container_start(self, container_id):
self.log("start container %s" % (container_id))
self.results['actions'].append(dict(started=container_id))
self.results['changed'] = True
if not self.check_mode:
try:
self.client.start(container=container_id)
except Exception as exc:
self.fail("Error starting container %s: %s" % (container_id, str(exc)))
if not self.parameters.detach:
status = self.client.wait(container_id)
output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False)
if status != 0:
self.fail(output, status=status)
if self.parameters.cleanup:
self.container_remove(container_id, force=True)
insp = self._get_container(container_id)
if insp.raw:
insp.raw['Output'] = output
else:
insp.raw = dict(Output=output)
return insp
return self._get_container(container_id)
def container_remove(self, container_id, link=False, force=False):
volume_state = (not self.parameters.keep_volumes)
self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
response = self.client.remove_container(container_id, v=volume_state, link=link, force=force)
except Exception as exc:
self.fail("Error removing container %s: %s" % (container_id, str(exc)))
return response
def container_update(self, container_id, update_parameters):
if update_parameters:
self.log("update container %s" % (container_id))
self.log(update_parameters, pretty_print=True)
self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
self.results['changed'] = True
if not self.check_mode and callable(getattr(self.client, 'update_container')):
try:
self.client.update_container(container_id, **update_parameters)
except Exception as exc:
self.fail("Error updating container %s: %s" % (container_id, str(exc)))
return self._get_container(container_id)
def container_kill(self, container_id):
self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
if self.parameters.kill_signal:
response = self.client.kill(container_id, signal=self.parameters.kill_signal)
else:
response = self.client.kill(container_id)
except Exception as exc:
self.fail("Error killing container %s: %s" % (container_id, exc))
return response
def container_stop(self, container_id):
if self.parameters.force_kill:
self.container_kill(container_id)
return
self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
if self.parameters.stop_timeout:
response = self.client.stop(container_id, timeout=self.parameters.stop_timeout)
else:
response = self.client.stop(container_id)
except Exception as exc:
self.fail("Error stopping container %s: %s" % (container_id, str(exc)))
return response
def main():
argument_spec = dict(
blkio_weight=dict(type='int'),
capabilities=dict(type='list'),
cleanup=dict(type='bool', default=False),
command=dict(type='str'),
cpu_period=dict(type='int'),
cpu_quota=dict(type='int'),
cpuset_cpus=dict(type='str'),
cpuset_mems=dict(type='str'),
cpu_shares=dict(type='int'),
detach=dict(type='bool', default=True),
devices=dict(type='list'),
dns_servers=dict(type='list'),
dns_opts=dict(type='list'),
dns_search_domains=dict(type='list'),
env=dict(type='dict'),
env_file=dict(type='path'),
entrypoint=dict(type='str'),
etc_hosts=dict(type='dict'),
exposed_ports=dict(type='list', aliases=['exposed', 'expose']),
force_kill=dict(type='bool', default=False, aliases=['forcekill']),
groups=dict(type='list'),
hostname=dict(type='str'),
ignore_image=dict(type='bool', default=False),
image=dict(type='str'),
interactive=dict(type='bool', default=False),
ipc_mode=dict(type='str'),
keep_volumes=dict(type='bool', default=True),
kernel_memory=dict(type='str'),
kill_signal=dict(type='str'),
labels=dict(type='dict'),
links=dict(type='list'),
log_driver=dict(type='str',
choices=['none', 'json-file', 'syslog', 'journald', 'gelf', 'fluentd', 'awslogs', 'splunk'],
default=None),
log_options=dict(type='dict', aliases=['log_opt']),
mac_address=dict(type='str'),
memory=dict(type='str', default='0'),
memory_reservation=dict(type='str'),
memory_swap=dict(type='str'),
memory_swappiness=dict(type='int'),
name=dict(type='str', required=True),
network_mode=dict(type='str'),
networks=dict(type='list'),
oom_killer=dict(type='bool'),
oom_score_adj=dict(type='int'),
paused=dict(type='bool', default=False),
pid_mode=dict(type='str'),
privileged=dict(type='bool', default=False),
published_ports=dict(type='list', aliases=['ports']),
pull=dict(type='bool', default=False),
purge_networks=dict(type='bool', default=False),
read_only=dict(type='bool', default=False),
recreate=dict(type='bool', default=False),
restart=dict(type='bool', default=False),
restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']),
restart_retries=dict(type='int', default=None),
shm_size=dict(type='str'),
security_opts=dict(type='list'),
state=dict(type='str', choices=['absent', 'present', 'started', 'stopped'], default='started'),
stop_signal=dict(type='str'),
stop_timeout=dict(type='int'),
trust_image_content=dict(type='bool', default=False),
tty=dict(type='bool', default=False),
ulimits=dict(type='list'),
user=dict(type='str'),
uts=dict(type='str'),
volumes=dict(type='list'),
volumes_from=dict(type='list'),
volume_driver=dict(type='str'),
)
required_if = [
('state', 'present', ['image'])
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True
)
cm = ContainerManager(client)
client.module.exit_json(**cm.results)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | apache-2.0 |
caspartse/QQ-Groups-Spider | vendor/pyexcel/constants.py | 1 | 3090 | """
pyexcel.constants
~~~~~~~~~~~~~~~~~~~
Constants appeared in pyexcel
:copyright: (c) 2015-2017 by Onni Software Ltd.
:license: New BSD License
"""
# flake8: noqa
DEFAULT_NA = ''
DEFAULT_NAME = 'pyexcel sheet'
DEFAULT_SHEET_NAME = 'pyexcel_sheet1'
MESSAGE_WARNING = "We do not overwrite files"
MESSAGE_WRITE_ERROR = "Cannot write sheet"
MESSAGE_ERROR_02 = "No valid parameters found!"
MESSAGE_DATA_ERROR_NO_SERIES = "No column names or row names found"
MESSAGE_DATA_ERROR_EMPTY_COLUMN_LIST = "Column list is empty. Do not waste resource"
MESSAGE_DATA_ERROR_COLUMN_LIST_INTEGER_TYPE = "Column list should be a list of integers"
MESSAGE_DATA_ERROR_COLUMN_LIST_STRING_TYPE = "Column list should be a list of integers"
MESSAGE_INDEX_OUT_OF_RANGE = "Index out of range"
MESSAGE_DATA_ERROR_EMPTY_CONTENT = "Nothing to be pasted!"
MESSAGE_DATA_ERROR_DATA_TYPE_MISMATCH = "Data type mismatch"
MESSAGE_DATA_ERROR_ORDEREDDICT_IS_EXPECTED = "Please give a ordered list"
MESSAGE_DEPRECATED_ROW_COLUMN = "Deprecated usage. Please use [row, column]"
MESSAGE_DEPRECATED_OUT_FILE = "Depreciated usage of 'out_file'. please use dest_file_name"
MESSAGE_DEPRECATED_CONTENT = "Depreciated usage of 'content'. please use file_content"
MESSAGE_NOT_IMPLEMENTED_01 = "Please use attribute row or column to extend sheet"
MESSAGE_NOT_IMPLEMENTED_02 = "Confused! What do you want to put as column names"
MESSAGE_READONLY = "This attribute is readonly"
MESSAGE_ERROR_NO_HANDLER = "No suitable plugins imported or installed"
MESSAGE_UNKNOWN_IO_OPERATION = "Internal error: an illegal source action"
MESSAGE_UPGRADE = "Please upgrade the plugin '%s' according to \
plugin compactibility table."
_IMPLEMENTATION_REMOVED = "Deprecated since 0.3.0! Implementation removed"
IO_FILE_TYPE_DOC_STRING = """
Get/Set data in/from {0} format
You could obtain content in {0} format by dot notation::
{1}.{0}
And you could as well set content by dot notation::
{1}.{0} = the_io_stream_in_{0}_format
if you need to pass on more parameters, you could use::
{1}.get_{0}(**keywords)
{1}.set_{0}(the_io_stream_in_{0}_format, **keywords)
"""
OUT_FILE_TYPE_DOC_STRING = """
Get data in {0} format
You could obtain content in {0} format by dot notation::
{1}.{0}
if you need to pass on more parameters, you could use::
{1}.get_{0}(**keywords)
"""
IN_FILE_TYPE_DOC_STRING = """
Set data in {0} format
You could set content in {0} format by dot notation::
{1}.{0}
if you need to pass on more parameters, you could use::
{1}.set_{0}(the_io_stream_in_{0}_format, **keywords)
"""
VALID_SHEET_PARAMETERS = ['name_columns_by_row',
'name_rows_by_column',
'colnames',
'rownames',
'transpose_before',
'transpose_after']
# for sources
# targets
SOURCE = 'source'
SHEET = 'sheet'
BOOK = 'book'
# actions
READ_ACTION = 'read'
WRITE_ACTION = 'write'
RW_ACTION = 'read-write'
FILE_TYPE_NOT_SUPPORTED_FMT = "File type '%s' is not supported for %s."
| mit |
gisce/OCB | addons/portal_event/__openerp__.py | 53 | 1665 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Event',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds event menu and features to your portal if event and portal are installed.
==========================================================================================
""",
'author': 'OpenERP SA',
'depends': ['event','portal'],
'data': [
'event_view.xml',
'security/portal_security.xml',
'portal_event_view.xml',
'security/ir.model.access.csv',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kapt/django-oscar | tests/unit/offer/results_tests.py | 62 | 1039 | from decimal import Decimal as D
from django.test import TestCase
from oscar.apps.offer import models, results
class TestOfferApplicationsObject(TestCase):
def setUp(self):
self.applications = results.OfferApplications()
self.offer = models.ConditionalOffer()
def test_is_countable(self):
self.assertEqual(0, len(self.applications))
def test_can_filter_shipping_discounts(self):
result = models.ShippingDiscount()
self.applications.add(self.offer, result)
self.assertEqual(1, len(self.applications.shipping_discounts))
def test_can_filter_offer_discounts(self):
result = models.BasketDiscount(D('2.00'))
self.applications.add(self.offer, result)
self.assertEqual(1, len(self.applications.offer_discounts))
def test_can_filter_post_order_actions(self):
result = models.PostOrderAction("Something will happen")
self.applications.add(self.offer, result)
self.assertEqual(1, len(self.applications.post_order_actions))
| bsd-3-clause |
VirtueSecurity/aws-extender | BappModules/boto/beanstalk/layer1.py | 146 | 56259 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
import boto.jsonresponse
from boto.compat import json
from boto.regioninfo import RegionInfo
from boto.connection import AWSQueryConnection
class Layer1(AWSQueryConnection):
APIVersion = '2010-12-01'
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'elasticbeanstalk.us-east-1.amazonaws.com'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None,
proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
api_version=None, security_token=None, profile_name=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(Layer1, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token, profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def _encode_bool(self, v):
v = bool(v)
return {True: "true", False: "false"}[v]
def _get_response(self, action, params, path='/', verb='GET'):
params['ContentType'] = 'JSON'
response = self.make_request(action, params, path, verb)
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
raise self.ResponseError(response.status, response.reason, body)
def check_dns_availability(self, cname_prefix):
"""Checks if the specified CNAME is available.
:type cname_prefix: string
:param cname_prefix: The prefix used when this CNAME is
reserved.
"""
params = {'CNAMEPrefix': cname_prefix}
return self._get_response('CheckDNSAvailability', params)
def create_application(self, application_name, description=None):
"""
Creates an application that has one configuration template
named default and no application versions.
:type application_name: string
:param application_name: The name of the application.
Constraint: This name must be unique within your account. If the
specified name already exists, the action returns an
InvalidParameterValue error.
:type description: string
:param description: Describes the application.
:raises: TooManyApplicationsException
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('CreateApplication', params)
def create_application_version(self, application_name, version_label,
description=None, s3_bucket=None,
s3_key=None, auto_create_application=None):
"""Creates an application version for the specified application.
:type application_name: string
:param application_name: The name of the application. If no
application is found with this name, and AutoCreateApplication is
false, returns an InvalidParameterValue error.
:type version_label: string
:param version_label: A label identifying this version. Constraint:
Must be unique per application. If an application version already
exists with this label for the specified application, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type description: string
:param description: Describes this version.
:type s3_bucket: string
:param s3_bucket: The Amazon S3 bucket where the data is located.
:type s3_key: string
:param s3_key: The Amazon S3 key where the data is located. Both
s3_bucket and s3_key must be specified in order to use a specific
source bundle. If both of these values are not specified the
sample application will be used.
:type auto_create_application: boolean
:param auto_create_application: Determines how the system behaves if
the specified application for this version does not already exist:
true: Automatically creates the specified application for this
version if it does not already exist. false: Returns an
InvalidParameterValue if the specified application for this version
does not already exist. Default: false Valid Values: true | false
:raises: TooManyApplicationsException,
TooManyApplicationVersionsException,
InsufficientPrivilegesException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
if s3_bucket and s3_key:
params['SourceBundle.S3Bucket'] = s3_bucket
params['SourceBundle.S3Key'] = s3_key
if auto_create_application:
params['AutoCreateApplication'] = self._encode_bool(
auto_create_application)
return self._get_response('CreateApplicationVersion', params)
def create_configuration_template(self, application_name, template_name,
solution_stack_name=None,
source_configuration_application_name=None,
source_configuration_template_name=None,
environment_id=None, description=None,
option_settings=None):
"""Creates a configuration template.
Templates are associated with a specific application and are used to
deploy different versions of the application with the same
configuration settings.
:type application_name: string
:param application_name: The name of the application to associate with
this configuration template. If no application is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template.
Constraint: This name must be unique per application. Default: If
a configuration template already exists with this name, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack used by this
configuration. The solution stack specifies the operating system,
architecture, and application server for a configuration template.
It determines the set of configuration options as well as the
possible and default values. Use ListAvailableSolutionStacks to
obtain a list of available solution stacks. Default: If the
SolutionStackName is not specified and the source configuration
parameter is blank, AWS Elastic Beanstalk uses the default solution
stack. If not specified and the source configuration parameter is
specified, AWS Elastic Beanstalk uses the same solution stack as
the source configuration template.
:type source_configuration_application_name: string
:param source_configuration_application_name: The name of the
application associated with the configuration.
:type source_configuration_template_name: string
:param source_configuration_template_name: The name of the
configuration template.
:type environment_id: string
:param environment_id: The ID of the environment used with this
configuration template.
:type description: string
:param description: Describes this configuration.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration option to the requested value. The new
value overrides the value obtained from the solution stack or the
source configuration template.
:raises: InsufficientPrivilegesException,
TooManyConfigurationTemplatesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if source_configuration_application_name:
params['SourceConfiguration.ApplicationName'] = source_configuration_application_name
if source_configuration_template_name:
params['SourceConfiguration.TemplateName'] = source_configuration_template_name
if environment_id:
params['EnvironmentId'] = environment_id
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
return self._get_response('CreateConfigurationTemplate', params)
def create_environment(self, application_name, environment_name,
version_label=None, template_name=None,
solution_stack_name=None, cname_prefix=None,
description=None, option_settings=None,
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
"""Launches an environment for the application using a configuration.
:type application_name: string
:param application_name: The name of the application that contains the
version to be deployed. If no application is found with this name,
CreateEnvironment returns an InvalidParameterValue error.
:type environment_name: string
:param environment_name: A unique name for the deployment environment.
Used in the application URL. Constraint: Must be from 4 to 23
characters in length. The name can contain only letters, numbers,
and hyphens. It cannot start or end with a hyphen. This name must
be unique in your account. If the specified name already exists,
AWS Elastic Beanstalk returns an InvalidParameterValue error.
Default: If the CNAME parameter is not specified, the environment
name becomes part of the CNAME, and therefore part of the visible
URL for your application.
:type version_label: string
:param version_label: The name of the application version to deploy. If
the specified application has no associated application versions,
AWS Elastic Beanstalk UpdateEnvironment returns an
InvalidParameterValue error. Default: If not specified, AWS
Elastic Beanstalk attempts to launch the most recently created
application version.
:type template_name: string
:param template_name: The name of the configuration template to
use in deployment. If no configuration template is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
Condition: You must specify either this parameter or a
SolutionStackName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type solution_stack_name: string
:param solution_stack_name: This is an alternative to specifying a
configuration name. If specified, AWS Elastic Beanstalk sets the
configuration values to the default values associated with the
specified solution stack. Condition: You must specify either this
or a TemplateName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type cname_prefix: string
:param cname_prefix: If specified, the environment attempts to use this
value as the prefix for the CNAME. If not specified, the
environment uses the environment name.
:type description: string
:param description: Describes this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration options to the requested value in the
configuration set for the new environment. These override the
values obtained from the solution stack or the configuration
template. Each element in the list is a tuple of (Namespace,
OptionName, Value), for example::
[('aws:autoscaling:launchconfiguration',
'Ec2KeyName', 'mykeypair')]
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this new
environment.
:type tier_name: string
:param tier_name: The name of the tier. Valid values are
"WebServer" and "Worker". Defaults to "WebServer".
The ``tier_name`` and a ``tier_type`` parameters are
related and the values provided must be valid.
The possible combinations are:
* "WebServer" and "Standard" (the default)
* "Worker" and "SQS/HTTP"
:type tier_type: string
:param tier_type: The type of the tier. Valid values are
"Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
if ``tier_name`` is "Worker". Defaults to "Standard".
:type tier_version: string
:type tier_version: The version of the tier. Valid values
currently are "1.0". Defaults to "1.0".
:raises: TooManyEnvironmentsException, InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if cname_prefix:
params['CNAMEPrefix'] = cname_prefix
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.Name'] = tier_name
params['Tier.Type'] = tier_type
params['Tier.Version'] = tier_version
return self._get_response('CreateEnvironment', params)
def create_storage_location(self):
"""
Creates the Amazon S3 storage location for the account. This
location is used to store user log files.
:raises: TooManyBucketsException,
S3SubscriptionRequiredException,
InsufficientPrivilegesException
"""
return self._get_response('CreateStorageLocation', params={})
def delete_application(self, application_name,
terminate_env_by_force=None):
"""
Deletes the specified application along with all associated
versions and configurations. The application versions will not
be deleted from your Amazon S3 bucket.
:type application_name: string
:param application_name: The name of the application to delete.
:type terminate_env_by_force: boolean
:param terminate_env_by_force: When set to true, running
environments will be terminated before deleting the application.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name}
if terminate_env_by_force:
params['TerminateEnvByForce'] = self._encode_bool(
terminate_env_by_force)
return self._get_response('DeleteApplication', params)
def delete_application_version(self, application_name, version_label,
delete_source_bundle=None):
"""Deletes the specified version from the specified application.
:type application_name: string
:param application_name: The name of the application to delete
releases from.
:type version_label: string
:param version_label: The label of the version to delete.
:type delete_source_bundle: boolean
:param delete_source_bundle: Indicates whether to delete the
associated source bundle from Amazon S3. Valid Values: true |
false
:raises: SourceBundleDeletionException,
InsufficientPrivilegesException,
OperationInProgressException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if delete_source_bundle:
params['DeleteSourceBundle'] = self._encode_bool(
delete_source_bundle)
return self._get_response('DeleteApplicationVersion', params)
def delete_configuration_template(self, application_name, template_name):
"""Deletes the specified configuration template.
:type application_name: string
:param application_name: The name of the application to delete
the configuration template from.
:type template_name: string
:param template_name: The name of the configuration template to
delete.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
return self._get_response('DeleteConfigurationTemplate', params)
def delete_environment_configuration(self, application_name,
environment_name):
"""
Deletes the draft configuration associated with the running
environment. Updating a running environment with any
configuration changes creates a draft configuration set. You can
get the draft configuration using DescribeConfigurationSettings
while the update is in progress or if the update fails. The
DeploymentStatus for the draft configuration indicates whether
the deployment is in process or has failed. The draft
configuration remains in existence until it is deleted with this
action.
:type application_name: string
:param application_name: The name of the application the
environment is associated with.
:type environment_name: string
:param environment_name: The name of the environment to delete
the draft configuration from.
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
return self._get_response('DeleteEnvironmentConfiguration', params)
def describe_application_versions(self, application_name=None,
version_labels=None):
"""Returns descriptions for existing application versions.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include ones that are associated
with the specified application.
:type version_labels: list
:param version_labels: If specified, restricts the returned
descriptions to only include ones that have the specified version
labels.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_labels:
self.build_list_params(params, version_labels,
'VersionLabels.member')
return self._get_response('DescribeApplicationVersions', params)
def describe_applications(self, application_names=None):
"""Returns the descriptions of existing applications.
:type application_names: list
:param application_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include those with the specified
names.
"""
params = {}
if application_names:
self.build_list_params(params, application_names,
'ApplicationNames.member')
return self._get_response('DescribeApplications', params)
def describe_configuration_options(self, application_name=None,
template_name=None,
environment_name=None,
solution_stack_name=None, options=None):
"""Describes configuration options used in a template or environment.
Describes the configuration options that are used in a
particular configuration template or environment, or that a
specified solution stack defines. The description includes the
values the options, their default values, and an indication of
the required action on a running environment if an option value
is changed.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template or environment. Only needed if you want
to describe the configuration options associated with either the
configuration template or environment.
:type template_name: string
:param template_name: The name of the configuration template whose
configuration options you want to describe.
:type environment_name: string
:param environment_name: The name of the environment whose
configuration options you want to describe.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack whose
configuration options you want to describe.
:type options: list
:param options: If specified, restricts the descriptions to only
the specified options.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if options:
self.build_list_params(params, options, 'Options.member')
return self._get_response('DescribeConfigurationOptions', params)
def describe_configuration_settings(self, application_name,
template_name=None,
environment_name=None):
"""
Returns a description of the settings for the specified
configuration set, that is, either a configuration template or
the configuration set associated with a running environment.
When describing the settings for the configuration set
associated with a running environment, it is possible to receive
two sets of setting descriptions. One is the deployed
configuration set, and the other is a draft configuration of an
environment that is either in the process of deployment or that
failed to deploy.
:type application_name: string
:param application_name: The application for the environment or
configuration template.
:type template_name: string
:param template_name: The name of the configuration template to
describe. Conditional: You must specify either this parameter or
an EnvironmentName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to describe.
Condition: You must specify either this or a TemplateName, but not
both. If you specify both, AWS Elastic Beanstalk returns an
InvalidParameterCombination error. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
"""
params = {'ApplicationName': application_name}
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeConfigurationSettings', params)
def describe_environment_resources(self, environment_id=None,
environment_name=None):
"""Returns AWS resources for this environment.
:type environment_id: string
:param environment_id: The ID of the environment to retrieve AWS
resource usage data. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to retrieve
AWS resource usage data. Condition: You must specify either this
or an EnvironmentId, or both. If you do not specify either, AWS
Elastic Beanstalk returns MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeEnvironmentResources', params)
def describe_environments(self, application_name=None, version_label=None,
environment_ids=None, environment_names=None,
include_deleted=None,
included_deleted_back_to=None):
"""Returns descriptions for existing environments.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that are associated
with this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to include only those that are associated
with this application version.
:type environment_ids: list
:param environment_ids: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified IDs.
:type environment_names: list
:param environment_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified names.
:type include_deleted: boolean
:param include_deleted: Indicates whether to include deleted
environments: true: Environments that have been deleted after
IncludedDeletedBackTo are displayed. false: Do not include deleted
environments.
:type included_deleted_back_to: timestamp
:param included_deleted_back_to: If specified when IncludeDeleted is
set to true, then environments deleted after this date are
displayed.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if environment_ids:
self.build_list_params(params, environment_ids,
'EnvironmentIds.member')
if environment_names:
self.build_list_params(params, environment_names,
'EnvironmentNames.member')
if include_deleted:
params['IncludeDeleted'] = self._encode_bool(include_deleted)
if included_deleted_back_to:
params['IncludedDeletedBackTo'] = included_deleted_back_to
return self._get_response('DescribeEnvironments', params)
def describe_events(self, application_name=None, version_label=None,
template_name=None, environment_id=None,
environment_name=None, request_id=None, severity=None,
start_time=None, end_time=None, max_records=None,
next_token=None):
"""Returns event descriptions matching criteria up to the last 6 weeks.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those associated with
this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those associated with this application
version.
:type template_name: string
:param template_name: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that are associated with this
environment configuration.
:type environment_id: string
:param environment_id: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type environment_name: string
:param environment_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type request_id: string
:param request_id: If specified, AWS Elastic Beanstalk restricts the
described events to include only those associated with this request
ID.
:type severity: string
:param severity: If specified, limits the events returned from this
call to include only those with the specified severity or higher.
:type start_time: timestamp
:param start_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur on or after this time.
:type end_time: timestamp
:param end_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur up to, but not including,
the EndTime.
:type max_records: integer
:param max_records: Specifies the maximum number of events that can be
returned, beginning with the most recent event.
:type next_token: string
:param next_token: Pagination token. If specified, the events return
the next batch of results.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if request_id:
params['RequestId'] = request_id
if severity:
params['Severity'] = severity
if start_time:
params['StartTime'] = start_time
if end_time:
params['EndTime'] = end_time
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self._get_response('DescribeEvents', params)
def list_available_solution_stacks(self):
"""Returns a list of the available solution stack names."""
return self._get_response('ListAvailableSolutionStacks', params={})
def rebuild_environment(self, environment_id=None, environment_name=None):
"""
Deletes and recreates all of the AWS resources (for example:
the Auto Scaling group, load balancer, etc.) for a specified
environment and forces a restart.
:type environment_id: string
:param environment_id: The ID of the environment to rebuild.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to rebuild.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RebuildEnvironment', params)
def request_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Initiates a request to compile the specified type of
information of the deployed environment. Setting the InfoType
to tail compiles the last lines from the application server log
files of every Amazon EC2 instance in your environment. Use
RetrieveEnvironmentInfo to access the compiled information.
:type info_type: string
:param info_type: The type of information to request.
:type environment_id: string
:param environment_id: The ID of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RequestEnvironmentInfo', params)
def restart_app_server(self, environment_id=None, environment_name=None):
"""
Causes the environment to restart the application container
server running on each Amazon EC2 instance.
:type environment_id: string
:param environment_id: The ID of the environment to restart the server
for. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to restart the
server for. Condition: You must specify either this or an
EnvironmentId, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RestartAppServer', params)
def retrieve_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Retrieves the compiled information from a RequestEnvironmentInfo
request.
:type info_type: string
:param info_type: The type of information to retrieve.
:type environment_id: string
:param environment_id: The ID of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RetrieveEnvironmentInfo', params)
def swap_environment_cnames(self, source_environment_id=None,
source_environment_name=None,
destination_environment_id=None,
destination_environment_name=None):
"""Swaps the CNAMEs of two environments.
:type source_environment_id: string
:param source_environment_id: The ID of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentId, you must specify the
DestinationEnvironmentId.
:type source_environment_name: string
:param source_environment_name: The name of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentName, you must specify the
DestinationEnvironmentName.
:type destination_environment_id: string
:param destination_environment_id: The ID of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentId with
the DestinationEnvironmentId.
:type destination_environment_name: string
:param destination_environment_name: The name of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentName with
the DestinationEnvironmentName.
"""
params = {}
if source_environment_id:
params['SourceEnvironmentId'] = source_environment_id
if source_environment_name:
params['SourceEnvironmentName'] = source_environment_name
if destination_environment_id:
params['DestinationEnvironmentId'] = destination_environment_id
if destination_environment_name:
params['DestinationEnvironmentName'] = destination_environment_name
return self._get_response('SwapEnvironmentCNAMEs', params)
def terminate_environment(self, environment_id=None, environment_name=None,
terminate_resources=None):
"""Terminates the specified environment.
:type environment_id: string
:param environment_id: The ID of the environment to terminate.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to terminate.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type terminate_resources: boolean
:param terminate_resources: Indicates whether the associated AWS
resources should shut down when the environment is terminated:
true: (default) The user AWS resources (for example, the Auto
Scaling group, LoadBalancer, etc.) are terminated along with the
environment. false: The environment is removed from the AWS
Elastic Beanstalk but the AWS resources continue to operate. For
more information, see the AWS Elastic Beanstalk User Guide.
Default: true Valid Values: true | false
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if terminate_resources:
params['TerminateResources'] = self._encode_bool(
terminate_resources)
return self._get_response('TerminateEnvironment', params)
def update_application(self, application_name, description=None):
"""
Updates the specified application to have the specified
properties.
:type application_name: string
:param application_name: The name of the application to update.
If no such application is found, UpdateApplication returns an
InvalidParameterValue error.
:type description: string
:param description: A new description for the application. Default: If
not specified, AWS Elastic Beanstalk does not update the
description.
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('UpdateApplication', params)
def update_application_version(self, application_name, version_label,
description=None):
"""Updates the application version to have the properties.
:type application_name: string
:param application_name: The name of the application associated with
this version. If no application is found with this name,
UpdateApplication returns an InvalidParameterValue error.
:type version_label: string
:param version_label: The name of the version to update. If no
application version is found with this label, UpdateApplication
returns an InvalidParameterValue error.
:type description: string
:param description: A new description for this release.
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
return self._get_response('UpdateApplicationVersion', params)
def update_configuration_template(self, application_name, template_name,
description=None, option_settings=None,
options_to_remove=None):
"""
Updates the specified configuration template to have the
specified properties or configuration option values.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template to update. If no application is found
with this name, UpdateConfigurationTemplate returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template to update.
If no configuration template is found with this name,
UpdateConfigurationTemplate returns an InvalidParameterValue error.
:type description: string
:param description: A new description for the configuration.
:type option_settings: list
:param option_settings: A list of configuration option settings to
update with the new specified option value.
:type options_to_remove: list
:param options_to_remove: A list of configuration options to remove
from the configuration set. Constraint: You can remove only
UserDefined configuration options.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
return self._get_response('UpdateConfigurationTemplate', params)
def update_environment(self, environment_id=None, environment_name=None,
version_label=None, template_name=None,
description=None, option_settings=None,
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
"""
Updates the environment description, deploys a new application
version, updates the configuration settings to an entirely new
configuration template, or updates select configuration option
values in the running environment. Attempting to update both
the release and configuration is not allowed and AWS Elastic
Beanstalk returns an InvalidParameterCombination error. When
updating the configuration settings to a new template or
individual settings, a draft configuration is created and
DescribeConfigurationSettings for this environment returns two
setting descriptions with different DeploymentStatus values.
:type environment_id: string
:param environment_id: The ID of the environment to update. If no
environment with this ID exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentName, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to update. If no
environment with this name exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentId, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type version_label: string
:param version_label: If this parameter is specified, AWS Elastic
Beanstalk deploys the named application version to the environment.
If no such application version is found, returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: If this parameter is specified, AWS Elastic
Beanstalk deploys this configuration template to the environment.
If no such configuration template is found, AWS Elastic Beanstalk
returns an InvalidParameterValue error.
:type description: string
:param description: If this parameter is specified, AWS Elastic
Beanstalk updates the description of this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk updates the
configuration set associated with the running environment and sets
the specified configuration options to the requested value.
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this environment.
:type tier_name: string
:param tier_name: The name of the tier. Valid values are
"WebServer" and "Worker". Defaults to "WebServer".
The ``tier_name`` and a ``tier_type`` parameters are
related and the values provided must be valid.
The possible combinations are:
* "WebServer" and "Standard" (the default)
* "Worker" and "SQS/HTTP"
:type tier_type: string
:param tier_type: The type of the tier. Valid values are
"Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
if ``tier_name`` is "Worker". Defaults to "Standard".
:type tier_version: string
:type tier_version: The version of the tier. Valid values
currently are "1.0". Defaults to "1.0".
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.Name'] = tier_name
params['Tier.Type'] = tier_type
params['Tier.Version'] = tier_version
return self._get_response('UpdateEnvironment', params)
def validate_configuration_settings(self, application_name,
option_settings, template_name=None,
environment_name=None):
"""
Takes a set of configuration settings and either a
configuration template or environment, and determines whether
those values are valid. This action returns a list of messages
indicating any errors or warnings associated with the selection
of option values.
:type application_name: string
:param application_name: The name of the application that the
configuration template or environment belongs to.
:type template_name: string
:param template_name: The name of the configuration template to
validate the settings against. Condition: You cannot specify both
this and an environment name.
:type environment_name: string
:param environment_name: The name of the environment to validate the
settings against. Condition: You cannot specify both this and a
configuration template name.
:type option_settings: list
:param option_settings: A list of the options and desired values to
evaluate.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name}
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('ValidateConfigurationSettings', params)
def _build_list_params(self, params, user_values, prefix, tuple_names):
# For params such as the ConfigurationOptionSettings,
# they can specify a list of tuples where each tuple maps to a specific
# arg. For example:
# user_values = [('foo', 'bar', 'baz']
# prefix=MyOption.member
# tuple_names=('One', 'Two', 'Three')
# would result in:
# MyOption.member.1.One = foo
# MyOption.member.1.Two = bar
# MyOption.member.1.Three = baz
for i, user_value in enumerate(user_values, 1):
current_prefix = '%s.%s' % (prefix, i)
for key, value in zip(tuple_names, user_value):
full_key = '%s.%s' % (current_prefix, key)
params[full_key] = value
| mit |
gunchleoc/django | tests/annotations/tests.py | 194 | 19574 | from __future__ import unicode_literals
import datetime
from decimal import Decimal
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db.models import (
F, BooleanField, CharField, Count, DateTimeField, ExpressionWrapper, Func,
IntegerField, Sum, Value,
)
from django.db.models.functions import Lower
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from .models import (
Author, Book, Company, DepartmentStore, Employee, Publisher, Store, Ticket,
)
def cxOracle_py3_bug(func):
"""
There's a bug in Django/cx_Oracle with respect to string handling under
Python 3 (essentially, they treat Python 3 strings as Python 2 strings
rather than unicode). This makes some tests here fail under Python 3, so
we mark them as expected failures until someone fixes them in #23843.
"""
from unittest import expectedFailure
from django.db import connection
return expectedFailure(func) if connection.vendor == 'oracle' and six.PY3 else func
class NonAggregateAnnotationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_basic_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()))
for book in books:
self.assertEqual(book.is_book, 1)
def test_basic_f_annotation(self):
books = Book.objects.annotate(another_rating=F('rating'))
for book in books:
self.assertEqual(book.another_rating, book.rating)
def test_joined_annotation(self):
books = Book.objects.select_related('publisher').annotate(
num_awards=F('publisher__num_awards'))
for book in books:
self.assertEqual(book.num_awards, book.publisher.num_awards)
def test_mixed_type_annotation_date_interval(self):
active = datetime.datetime(2015, 3, 20, 14, 0, 0)
duration = datetime.timedelta(hours=1)
expires = datetime.datetime(2015, 3, 20, 14, 0, 0) + duration
Ticket.objects.create(active_at=active, duration=duration)
t = Ticket.objects.annotate(
expires=ExpressionWrapper(F('active_at') + F('duration'), output_field=DateTimeField())
).first()
self.assertEqual(t.expires, expires)
def test_mixed_type_annotation_numbers(self):
test = self.b1
b = Book.objects.annotate(
combined=ExpressionWrapper(F('pages') + F('rating'), output_field=IntegerField())
).get(isbn=test.isbn)
combined = int(test.pages + test.rating)
self.assertEqual(b.combined, combined)
def test_annotate_with_aggregation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()),
rating_count=Count('rating'))
for book in books:
self.assertEqual(book.is_book, 1)
self.assertEqual(book.rating_count, 1)
def test_aggregate_over_annotation(self):
agg = Author.objects.annotate(other_age=F('age')).aggregate(otherage_sum=Sum('other_age'))
other_agg = Author.objects.aggregate(age_sum=Sum('age'))
self.assertEqual(agg['otherage_sum'], other_agg['age_sum'])
@skipUnlessDBFeature('can_distinct_on_fields')
def test_distinct_on_with_annotation(self):
store = Store.objects.create(
name='test store',
original_opening=datetime.datetime.now(),
friday_night_closing=datetime.time(21, 00, 00),
)
names = [
'Theodore Roosevelt',
'Eleanor Roosevelt',
'Franklin Roosevelt',
'Ned Stark',
'Catelyn Stark',
]
for name in names:
Employee.objects.create(
store=store,
first_name=name.split()[0],
last_name=name.split()[1],
age=30, salary=2000,
)
people = Employee.objects.annotate(
name_lower=Lower('last_name'),
).distinct('name_lower')
self.assertEqual(set(p.last_name for p in people), {'Stark', 'Roosevelt'})
self.assertEqual(len(people), 2)
people2 = Employee.objects.annotate(
test_alias=F('store__name'),
).distinct('test_alias')
self.assertEqual(len(people2), 1)
def test_filter_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField())
).filter(is_book=1)
for book in books:
self.assertEqual(book.is_book, 1)
def test_filter_annotation_with_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=3.5)
for book in books:
self.assertEqual(book.other_rating, 3.5)
def test_filter_annotation_with_double_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=F('rating'))
for book in books:
self.assertEqual(book.other_rating, book.rating)
def test_filter_agg_with_double_f(self):
books = Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('sum_rating'))
for book in books:
self.assertEqual(book.sum_rating, book.rating)
def test_filter_wrong_annotation(self):
with six.assertRaisesRegex(self, FieldError, "Cannot resolve keyword .*"):
list(Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('nope')))
def test_combined_annotation_commutative(self):
book1 = Book.objects.annotate(adjusted_rating=F('rating') + 2).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=2 + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
book1 = Book.objects.annotate(adjusted_rating=F('rating') + None).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=None + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
def test_update_with_annotation(self):
book_preupdate = Book.objects.get(pk=self.b2.pk)
Book.objects.annotate(other_rating=F('rating') - 1).update(rating=F('other_rating'))
book_postupdate = Book.objects.get(pk=self.b2.pk)
self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating)
def test_annotation_with_m2m(self):
books = Book.objects.annotate(author_age=F('authors__age')).filter(pk=self.b1.pk).order_by('author_age')
self.assertEqual(books[0].author_age, 34)
self.assertEqual(books[1].author_age, 35)
def test_annotation_reverse_m2m(self):
books = Book.objects.annotate(
store_name=F('store__name')).filter(
name='Practical Django Projects').order_by(
'store_name')
self.assertQuerysetEqual(
books, [
'Amazon.com',
'Books.com',
'Mamma and Pappa\'s Books'
],
lambda b: b.store_name
)
def test_values_annotation(self):
"""
Annotations can reference fields in a values clause,
and contribute to an existing values clause.
"""
# annotate references a field in values()
qs = Book.objects.values('rating').annotate(other_rating=F('rating') - 1)
book = qs.get(pk=self.b1.pk)
self.assertEqual(book['rating'] - 1, book['other_rating'])
# filter refs the annotated value
book = qs.get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
# can annotate an existing values with a new field
book = qs.annotate(other_isbn=F('isbn')).get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
self.assertEqual(book['other_isbn'], '155860191')
def test_defer_annotation(self):
"""
Deferred attributes can be referenced by an annotation,
but they are not themselves deferred, and cannot be deferred.
"""
qs = Book.objects.defer('rating').annotate(other_rating=F('rating') - 1)
with self.assertNumQueries(2):
book = qs.get(other_rating=4)
self.assertEqual(book.rating, 5)
self.assertEqual(book.other_rating, 4)
with six.assertRaisesRegex(self, FieldDoesNotExist, "\w has no field named u?'other_rating'"):
book = qs.defer('other_rating').get(other_rating=4)
def test_mti_annotations(self):
"""
Fields on an inherited model can be referenced by an
annotated field.
"""
d = DepartmentStore.objects.create(
name='Angus & Robinson',
original_opening=datetime.date(2014, 3, 8),
friday_night_closing=datetime.time(21, 00, 00),
chain='Westfield'
)
books = Book.objects.filter(rating__gt=4)
for b in books:
d.books.add(b)
qs = DepartmentStore.objects.annotate(
other_name=F('name'),
other_chain=F('chain'),
is_open=Value(True, BooleanField()),
book_isbn=F('books__isbn')
).order_by('book_isbn').filter(chain='Westfield')
self.assertQuerysetEqual(
qs, [
('Angus & Robinson', 'Westfield', True, '155860191'),
('Angus & Robinson', 'Westfield', True, '159059725')
],
lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn)
)
def test_null_annotation(self):
"""
Test that annotating None onto a model round-trips
"""
book = Book.objects.annotate(no_value=Value(None, output_field=IntegerField())).first()
self.assertIsNone(book.no_value)
def test_order_by_annotation(self):
authors = Author.objects.annotate(other_age=F('age')).order_by('other_age')
self.assertQuerysetEqual(
authors, [
25, 29, 29, 34, 35, 37, 45, 46, 57,
],
lambda a: a.other_age
)
def test_order_by_aggregate(self):
authors = Author.objects.values('age').annotate(age_count=Count('age')).order_by('age_count', 'age')
self.assertQuerysetEqual(
authors, [
(25, 1), (34, 1), (35, 1), (37, 1), (45, 1), (46, 1), (57, 1), (29, 2),
],
lambda a: (a['age'], a['age_count'])
)
def test_annotate_exists(self):
authors = Author.objects.annotate(c=Count('id')).filter(c__gt=1)
self.assertFalse(authors.exists())
def test_column_field_ordering(self):
"""
Test that columns are aligned in the correct order for
resolve_columns. This test will fail on mysql if column
ordering is out. Column fields should be aligned as:
1. extra_select
2. model_fields
3. annotation_fields
4. model_related_fields
"""
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
self.assertQuerysetEqual(
qs.order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
def test_column_field_ordering_with_deferred(self):
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
# and we respect deferred columns!
self.assertQuerysetEqual(
qs.defer('age').order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
@cxOracle_py3_bug
def test_custom_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE')
).order_by('name')
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'),
('Django Software Foundation', 'No Tag'),
('Google', 'Do No Evil'),
('Yahoo', 'Internet Company')
],
lambda c: (c.name, c.tagline)
)
@cxOracle_py3_bug
def test_custom_functions_can_ref_other_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
class Lower(Func):
function = 'LOWER'
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE')
).annotate(
tagline_lower=Lower(F('tagline'), output_field=CharField())
).order_by('name')
# LOWER function supported by:
# oracle, postgres, mysql, sqlite, sqlserver
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'.lower()),
('Django Software Foundation', 'No Tag'.lower()),
('Google', 'Do No Evil'.lower()),
('Yahoo', 'Internet Company'.lower())
],
lambda c: (c.name, c.tagline_lower)
)
| bsd-3-clause |
brandond/ansible | test/units/modules/network/f5/test_bigip_gtm_global.py | 21 | 3688 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_gtm_global import ApiParameters
from library.modules.bigip_gtm_global import ModuleParameters
from library.modules.bigip_gtm_global import ModuleManager
from library.modules.bigip_gtm_global import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_gtm_global import ApiParameters
from ansible.modules.network.f5.bigip_gtm_global import ModuleParameters
from ansible.modules.network.f5.bigip_gtm_global import ModuleManager
from ansible.modules.network.f5.bigip_gtm_global import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
synchronization=True,
synchronization_group_name='foo',
synchronize_zone_files=True
)
p = ModuleParameters(params=args)
assert p.synchronization is True
assert p.synchronization_group_name == 'foo'
assert p.synchronize_zone_files is True
def test_api_parameters(self):
args = load_fixture('load_gtm_global_settings_general_1.json')
p = ApiParameters(params=args)
assert p.synchronization is False
assert p.synchronization_group_name == 'default'
assert p.synchronize_zone_files is False
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def update(self, *args):
set_module_args(dict(
synchronization="yes",
synchronization_group_name='foo',
synchronize_zone_files="yes",
server='localhost',
password='password',
user='admin'
))
current = ApiParameters(params=load_fixture('load_gtm_global_settings_general_1.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['synchronization'] == 'yes'
assert results['synchronization_group_name'] == 'foo'
assert results['synchronize_zone_files'] == 'yes'
| gpl-3.0 |
rarcotvmw/capirca | lib/pcap.py | 1 | 15928 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pcap filter generator.
This generate a pcap packet filter expression that either:
1) Matches (i.e., captures), the packets that match the ACCEPT clauses
specified in a given policy, or
2) Matches the packets that match opposite of that, i.e., the DENY or REJECT
clauses.
Support tcp flags matching and icmptypes, including ipv6/icmpv6, but not much
else past the standard addres, port, and protocol conditions.
Note that this is still alpha and will likely require more testing prior to
having more confidence in it.
Stolen liberally from packetfilter.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from lib import aclgenerator
from lib import nacaddr
import logging
class Error(Exception):
"""Base error class."""
class UnsupportedActionError(Error):
"""Raised when we see an unsupported action."""
class UnsupportedTargetOption(Error):
"""Raised when we see an unsupported option."""
class Term(aclgenerator.Term):
"""Generate pcap filter to match a policy term."""
_PLATFORM = 'pcap'
_ACTION_TABLE = {
'accept': '',
'deny': '',
'reject': '',
'next': '',
}
_TCP_FLAGS_TABLE = {
'syn': 'tcp-syn',
'ack': 'tcp-ack',
'fin': 'tcp-fin',
'rst': 'tcp-rst',
'urg': 'tcp-urg',
'psh': 'tcp-push',
'all': '(tcp-syn|tcp-ack|tcp-fin|tcp-rst|tcp-urg|tcp-push)',
'none': '(tcp-syn&tcp-ack&tcp-fin&tcp-rst&tcp-urg&tcp-push)',
}
_PROTO_TABLE = {
'ah': 'proto \\ah',
'esp': 'proto \\esp',
'icmp': 'proto \\icmp',
'icmpv6': 'icmp6',
'ip': 'proto \\ip',
'ip6': 'ip6',
'igmp': 'proto \\igmp',
'igrp': 'igrp',
'pim': 'proto \\pim',
'tcp': 'proto \\tcp',
'udp': 'proto \\udp',
# bpf supports "\vrrp", but some winpcap version dont' recognize it,
# so use the IANA protocol number for it:
'vrrp': 'proto 112',
'hopopt': 'ip6 protochain 0',
}
def __init__(self, term, filter_name, af='inet', direction=''):
"""Setup a new term.
Args:
term: A policy.Term object to represent in packetfilter.
filter_name: The name of the filter chan to attach the term to.
af: Which address family ('inet' or 'inet6') to apply the term to.
direction: Direction of the flow.
Raises:
aclgenerator.UnsupportedFilterError: Filter is not supported.
"""
super(Term, self).__init__(term)
self.term = term # term object
self.filter = filter_name # actual name of filter
self.options = []
self.default_action = 'deny'
self.af = af
self.direction = direction
def __str__(self):
"""Render config output from this term object."""
# Verify platform specific terms. Skip whole term if platform does not
# match.
if self.term.platform:
if self._PLATFORM not in self.term.platform:
return ''
if self.term.platform_exclude:
if self._PLATFORM in self.term.platform_exclude:
return ''
conditions = []
# if terms does not specify action, use filter default action
if not self.term.action:
self.term.action[0].value = self.default_action
if str(self.term.action[0]) not in self._ACTION_TABLE:
raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % (
'\n', self.term.name, self.term.action[0],
'action not currently supported.'))
# source address
term_saddrs = self._CheckAddressAf(self.term.source_address)
if not term_saddrs:
logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name,
direction='source',
af=self.af))
return ''
conditions.append(self._GenerateAddrStatement(
term_saddrs, self.term.source_address_exclude))
# destination address
term_daddrs = self._CheckAddressAf(self.term.destination_address)
if not term_daddrs:
logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name,
direction='destination',
af=self.af))
return ''
conditions.append(self._GenerateAddrStatement(
term_daddrs, self.term.destination_address_exclude))
# protocol
if self.term.protocol_except:
raise aclgenerator.UnsupportedFilterError('%s %s %s' % (
'\n', self.term.name,
'protocol_except logic not currently supported.'))
conditions.append(self._GenerateProtoStatement(self.term.protocol))
conditions.append(self._GeneratePortStatement(
self.term.source_port, 'src'))
conditions.append(self._GeneratePortStatement(
self.term.destination_port, 'dst'))
# icmp-type
icmp_types = ['']
if self.term.icmp_type:
if self.af != 'mixed':
af = self.af
elif self.term.protocol == ['icmp']:
af = 'inet'
elif self.term.protocol == ['icmp6']:
af = 'inet6'
else:
raise aclgenerator.UnsupportedFilterError('%s %s %s' % (
'\n', self.term.name,
'icmp protocol is not defined or not supported.'))
icmp_types = self.NormalizeIcmpTypes(
self.term.icmp_type, self.term.protocol, af)
if 'icmp' in self.term.protocol:
conditions.append(self._GenerateIcmpType(icmp_types,
self.term.icmp_code))
# tcp options
if 'tcp' in self.term.protocol:
conditions.append(self._GenerateTcpOptions(self.term.option))
cond = Term.JoinConditionals(conditions, 'and')
# Note that directionally-based pcap filter requires post-processing to
# replace 'localhost' with whatever the IP(s) of the local machine happen
# to be. This bit of logic ensure there's a placeholder with the
# appropriate booleans around it. We also have to check that there exists
# some form of condition already, else we'll end up with something overly
# broad like 'dst net localhost' (e.g., 'default-deny').
if cond and self.direction == 'in':
cond = Term.JoinConditionals(['dst net localhost', cond], 'and')
elif cond and self.direction == 'out':
cond = Term.JoinConditionals(['src net localhost', cond], 'and')
return cond + '\n'
def _CheckAddressAf(self, addrs):
"""Verify that the requested address-family matches the address's family."""
if not addrs:
return ['any']
if self.af == 'mixed':
return addrs
af_addrs = []
af = self.NormalizeAddressFamily(self.af)
for addr in addrs:
if addr.version == af:
af_addrs.append(addr)
return af_addrs
@staticmethod
def JoinConditionals(condition_list, operator):
"""Join conditionals using the specified operator.
Filters out empty elements and blank strings.
Args:
condition_list: a list of str()-able items to join.
operator: the join string.
Returns:
A string consisting of the joined elements. If all elements are False
or whitespace-only, the empty string.
"""
condition_list = filter(None, condition_list)
condition_list = [str(x).strip(' ') for x in condition_list
if str(x).strip()]
if not condition_list:
return ''
op = ' %s ' % (operator)
res = '(%s)' % (op.join(condition_list))
return res
def _GenerateAddrStatement(self, addrs, exclude_addrs):
addrlist = []
for d in nacaddr.CollapseAddrListRecursive(addrs):
if d != 'any' and str(d) != '::/0':
addrlist.append('dst net %s' % (d))
excludes = []
if exclude_addrs:
for d in nacaddr.CollapseAddrListRecursive(exclude_addrs):
if d != 'any' and str(d) != '::/0':
excludes.append('not dst net %s' % (d))
else:
# excluding 'any' doesn't really make sense ...
return ''
if excludes:
return Term.JoinConditionals(
[Term.JoinConditionals(addrlist, 'or'),
Term.JoinConditionals(excludes, 'or')], 'and not')
else:
return Term.JoinConditionals(addrlist, 'or')
def _GenerateProtoStatement(self, protocols):
return Term.JoinConditionals(
[self._PROTO_TABLE[p] for p in protocols], 'or')
def _GeneratePortStatement(self, ports, direction):
conditions = []
# term.destination_port is a list of tuples containing the start and end
# ports of the port range. In the event it is a single port, the start
# and end ports are the same.
for port_tuple in ports:
if port_tuple[0] == port_tuple[1]:
conditions.append('%s port %s' % (direction, port_tuple[0]))
else:
conditions.append('%s portrange %s-%s' % (
direction, port_tuple[0], port_tuple[1]))
return Term.JoinConditionals(conditions, 'or')
def _GenerateTcpOptions(self, options):
opts = [str(x) for x in options]
tcp_flags_set = []
tcp_flags_check = []
for next_opt in opts:
if next_opt == 'tcp-established':
tcp_flags_set.append(self._TCP_FLAGS_TABLE['ack'])
tcp_flags_check.extend([self._TCP_FLAGS_TABLE['ack']])
else:
# Iterate through flags table, and create list of tcp-flags to append
for next_flag in self._TCP_FLAGS_TABLE:
if next_opt.find(next_flag) == 0:
tcp_flags_check.append(self._TCP_FLAGS_TABLE.get(next_flag))
tcp_flags_set.append(self._TCP_FLAGS_TABLE.get(next_flag))
if tcp_flags_check:
return '(tcp[tcpflags] & (%s) == (%s))' % ('|'.join(tcp_flags_check),
'|'.join(tcp_flags_set))
return ''
def _GenerateIcmpType(self, icmp_types, icmp_code):
rtr_str = ''
if icmp_types:
code_strings = ['']
if icmp_code:
code_strings = [' and icmp[icmpcode] == %d' % code for
code in icmp_code]
rtr_str = Term.JoinConditionals(
['icmp[icmptype] == %d%s' % (x, y) for y in code_strings for
x in icmp_types], 'or')
return rtr_str
class PcapFilter(aclgenerator.ACLGenerator):
"""Generates filters and terms from provided policy object.
Note that since pcap isn't actually a firewall grammar, this generates a
filter that only matches matches that which would be accepted by the
specified policy.
"""
_PLATFORM = 'pcap'
_DEFAULT_PROTOCOL = 'all'
SUFFIX = '.pcap'
_TERM = Term
def __init__(self, *args, **kwargs):
"""Initialize a PcapFilter generator.
Takes standard ACLGenerator arguments, as well as an 'invert' kwarg. If
this argument is true, the pcap filter will be reversed, such that it
matches all those packets that would be denied by the specified policy.
"""
self._invert = False
if 'invert' in kwargs:
self._invert = kwargs['invert']
del kwargs['invert']
super(PcapFilter, self).__init__(*args, **kwargs)
def _BuildTokens(self):
"""Build supported tokens for platform.
Returns:
tuple containing both supported tokens and sub tokens
"""
supported_tokens, supported_sub_tokens = super(
PcapFilter, self)._BuildTokens()
supported_tokens |= {'logging', 'icmp_code'}
supported_tokens -= {'verbatim'}
supported_sub_tokens.update(
{'action': {'accept', 'deny', 'reject', 'next'},
'option': {
'tcp-established',
'established',
'syn',
'ack',
'fin',
'rst',
'urg',
'psh',
'all',
'none'},
})
return supported_tokens, supported_sub_tokens
def _TranslatePolicy(self, pol, exp_info):
self.pcap_policies = []
current_date = datetime.datetime.utcnow().date()
exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
good_afs = ['inet', 'inet6', 'mixed']
good_options = ['in', 'out']
direction = ''
for header, terms in pol.filters:
filter_type = None
if self._PLATFORM not in header.platforms:
continue
filter_options = header.FilterOptions(self._PLATFORM)[1:]
filter_name = header.FilterName(self._PLATFORM)
# ensure all options after the filter name are expected
for opt in filter_options:
if opt not in good_afs + good_options:
raise UnsupportedTargetOption('%s %s %s %s' % (
'\nUnsupported option found in', self._PLATFORM,
'target definition:', opt))
if 'in' in filter_options:
direction = 'in'
elif 'out' in filter_options:
direction = 'out'
# Check for matching af
for address_family in good_afs:
if address_family in filter_options:
# should not specify more than one AF in options
if filter_type is not None:
raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % (
'\nMay only specify one of', good_afs, 'in filter options:',
filter_options))
filter_type = address_family
if filter_type is None:
filter_type = 'mixed'
# add the terms
accept_terms = []
deny_terms = []
term_names = set()
for term in terms:
if term.name in term_names:
raise aclgenerator.DuplicateTermError(
'You have a duplicate term: %s' % term.name)
if term.expiration:
if term.expiration <= exp_info_date:
logging.info('INFO: Term %s in policy %s expires '
'in less than two weeks.', term.name, filter_name)
if term.expiration <= current_date:
logging.warn('WARNING: Term %s in policy %s is expired and '
'will not be rendered.', term.name, filter_name)
continue
if not term:
continue
if term.action[0] == 'accept':
accept_terms.append(self._TERM(term, filter_name, filter_type,
direction))
elif term.action[0] == 'deny' or term.action[0] == 'reject':
deny_terms.append(self._TERM(term, filter_name, filter_type,
direction))
self.pcap_policies.append((header, filter_name, filter_type, accept_terms,
deny_terms))
def __str__(self):
"""Render the output of the PF policy into config."""
target = []
for (unused_header, unused_filter_name, unused_filter_type, accept_terms,
deny_terms) in self.pcap_policies:
accept = []
for term in accept_terms:
term_str = str(term)
if term_str:
accept.append(str(term))
accept_clause = Term.JoinConditionals(accept, 'and')
deny = []
for term in deny_terms:
term_str = str(term)
if term_str:
deny.append(str(term))
deny_clause = Term.JoinConditionals(deny, 'and')
if self._invert:
target.append(
Term.JoinConditionals([deny_clause, accept_clause], 'and not'))
else:
target.append(
Term.JoinConditionals([accept_clause, deny_clause], 'and not'))
return '\nor\n'.join(target) + '\n'
| apache-2.0 |
clubcapra/Ibex | src/seagoatvision_ros/scripts/CapraVision/server/recording/image.py | 1 | 1268 | #! /usr/bin/env python
# Copyright (C) 2012 Club Capra - capra.etsmtl.ca
#
# This filename is part of CapraVision.
#
# CapraVision is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cv2
import os
class ImageRecorder:
def __init__(self, savepath, filtre):
self.savepath = savepath
self.filtre = filtre
self.index = 0
def next_filename(self):
return os.path.join(
self.savepath,
self.filtre.__class__.__name__,
str(self.index).zfill(10) + '.png')
def save(self, image):
cv2.imwrite(self.next_filename(), image)
| gpl-3.0 |
beacloudgenius/edx-platform | common/lib/xmodule/xmodule/modulestore/split_mongo/definition_lazy_loader.py | 213 | 1560 | from opaque_keys.edx.locator import DefinitionLocator
import copy
class DefinitionLazyLoader(object):
"""
A placeholder to put into an xblock in place of its definition which
when accessed knows how to get its content. Only useful if the containing
object doesn't force access during init but waits until client wants the
definition. Only works if the modulestore is a split mongo store.
"""
def __init__(self, modulestore, course_key, block_type, definition_id, field_converter):
"""
Simple placeholder for yet-to-be-fetched data
:param modulestore: the pymongo db connection with the definitions
:param definition_locator: the id of the record in the above to fetch
"""
self.modulestore = modulestore
self.course_key = course_key
self.definition_locator = DefinitionLocator(block_type, definition_id)
self.field_converter = field_converter
def fetch(self):
"""
Fetch the definition. Note, the caller should replace this lazy
loader pointer with the result so as not to fetch more than once
"""
# get_definition may return a cached value perhaps from another course or code path
# so, we copy the result here so that updates don't cross-pollinate nor change the cached
# value in such a way that we can't tell that the definition's been updated.
definition = self.modulestore.get_definition(self.course_key, self.definition_locator.definition_id)
return copy.deepcopy(definition)
| agpl-3.0 |
ahmetabdi/SickRage | autoProcessTV/lib/requests/packages/urllib3/response.py | 316 | 10537 | # urllib3/response.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import zlib
import io
from .exceptions import DecodeError
from .packages.six import string_types as basestring, binary_type
from .util import is_fp_closed
log = logging.getLogger(__name__)
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
def _get_decoder(mode):
if mode == 'gzip':
return zlib.decompressobj(16 + zlib.MAX_WBITS)
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = headers or {}
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = body if body and isinstance(body, basestring) else None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 2616
# Section 3.5
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do not
# properly close the connection in all cases. There is no harm
# in redundantly calling close.
self._fp.close()
flush_decoder = True
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
# Normalize headers between different versions of Python
headers = {}
for k, v in r.getheaders():
# Python 3: Header keys are returned capitalised
k = k.lower()
has_value = headers.get(k)
if has_value: # Python 3: Repeating header keys are unmerged.
v = ', '.join([has_value, v])
headers[k] = v
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
return True
| gpl-3.0 |
gmr/infoblox | infoblox/record.py | 1 | 15975 | """
Base Record Object
"""
import logging
from infoblox import exceptions
from infoblox import mapping
LOGGER = logging.getLogger(__name__)
class Record(mapping.Mapping):
"""This object is extended by specific Infoblox record types and implements
the core API behavior of a record class. Attributes that map to other
infoblox records will be instances of those record types.
:param infoblox.Session session: The infoblox session object
:param str reference_id: The infoblox _ref value for the record
:param dict kwargs: Key-value pairs that when passed in, if the a key
matches an attribute of the record, the value will be assigned.
"""
view = 'default'
_ref = None
_repr_keys = ['_ref']
_return_ignore = ['view']
_save_ignore = []
_search_by = []
_session = None
_supports = []
_wapi_type = 'record'
def __init__(self, session, reference_id=None, **kwargs):
"""Create a new instance of the Record passing in the Infoblox
session object and the reference id for the record.
"""
super(Record, self).__init__(**kwargs)
self._session = session
self._ref = reference_id
self._search_values = self._build_search_values(kwargs)
if self._ref or self._search_values:
self.fetch()
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
' '.join(['%s=%s' % (key, getattr(self, key))
for key in self._repr_keys]))
def delete(self):
"""Remove the item from the infoblox server.
:rtype: bool
:raises: AssertionError
:raises: ValueError
:raises: infoblox.exceptions.ProtocolError
"""
if not self._ref:
raise ValueError('Object has no reference id for deletion')
if 'save' not in self._supports:
raise AssertionError('Can not save this object type')
response = self._session.delete(self._path)
if response.status_code == 200:
self._ref = None
self.clear()
return True
try:
error = response.json()
raise exceptions.ProtocolError(error['text'])
except ValueError:
raise exceptions.ProtocolError(response.content)
def fetch(self):
"""Attempt to fetch the object from the Infoblox device. If successful
the object will be updated and the method will return True.
:rtype: bool
:raises: infoblox.exceptions.ProtocolError
"""
LOGGER.debug('Fetching %s, %s', self._path, self._search_values)
response = self._session.get(self._path, self._search_values,
{'_return_fields': self._return_fields})
if response.status_code == 200:
values = response.json()
self._assign(values)
return bool(values)
elif response.status_code >= 400:
try:
error = response.json()
raise exceptions.ProtocolError(error['text'])
except ValueError:
raise exceptions.ProtocolError(response.content)
return False
def reference_id(self):
"""Return a read-only handle for the reference_id of this object.
"""
return str(self._ref)
def save(self):
"""Update the infoblox with new values for the specified object, or add
the values if it's a new object all together.
:raises: AssertionError
:raises: infoblox.exceptions.ProtocolError
"""
if 'save' not in self._supports:
raise AssertionError('Can not save this object type')
values = {}
for key in [key for key in self.keys() if key not in self._save_ignore]:
if not getattr(self, key) and getattr(self, key) != False:
continue
if isinstance(getattr(self, key, None), list):
value = list()
for item in getattr(self, key):
if isinstance(item, dict):
value.append(item)
elif hasattr(item, '_save_as'):
value.append(item._save_as())
elif hasattr(item, '_ref') and getattr(item, '_ref'):
value.append(getattr(item, '_ref'))
else:
LOGGER.warning('Cant assign %r', item)
values[key] = value
elif getattr(self, key, None):
values[key] = getattr(self, key)
if not self._ref:
response = self._session.post(self._path, values)
else:
values['_ref'] = self._ref
response = self._session.put(self._path, values)
LOGGER.debug('Response: %r, %r', response.status_code, response.content)
if 200 <= response.status_code <= 201:
self.fetch()
return True
else:
try:
error = response.json()
raise exceptions.ProtocolError(error['text'])
except ValueError:
raise exceptions.ProtocolError(response.content)
def _assign(self, values):
"""Assign the values passed as either a dict or list to the object if
the key for each value matches an available attribute on the object.
:param dict values: The values to assign
"""
LOGGER.debug('Assigning values: %r', values)
if not values:
return
keys = self.keys()
if not self._ref:
keys.append('_ref')
if isinstance(values, dict):
for key in keys:
if values.get(key):
if isinstance(values.get(key), list):
items = list()
for item in values[key]:
if isinstance(item, dict):
if '_ref' in item:
obj_class = get_class(item['_ref'])
if obj_class:
items.append(obj_class(self._session,
**item))
else:
items.append(item)
setattr(self, key, items)
else:
setattr(self, key, values[key])
elif isinstance(values, list):
self._assign(values[0])
else:
LOGGER.critical('Unhandled return type: %r', values)
def _build_search_values(self, kwargs):
"""Build the search criteria dictionary. It will first try and build
the values from already set attributes on the object, falling back
to the passed in kwargs.
:param dict kwargs: Values to build the dict from
:rtype: dict
"""
criteria = {}
for key in self._search_by:
if getattr(self, key, None):
criteria[key] = getattr(self, key)
elif key in kwargs and kwargs.get(key):
criteria[key] = kwargs.get(key)
return criteria
@property
def _path(self):
return self._ref if self._ref else self._wapi_type
@property
def _return_fields(self):
return ','.join([key for key in self.keys()
if key not in self._return_ignore])
class Host(Record):
"""Implements the host record type.
Example::
session = infoblox.Session(infoblox_host,
infoblox_user,
infoblox_password)
host = infoblox.Host(session, name='foo.bar.net')
"""
aliases = []
comment = None
configure_for_dns = True
disable = False
dns_aliases = []
dns_name = None
extattrs = None
ipv4addrs = []
ipv6addrs = []
name = None
rrset_order = 'cyclic'
ttl = None
use_ttl = False
zone = None
_repr_keys = ['name', 'ipv4addrs', 'ipv6addrs']
_save_ignore = ['dns_name', 'host', 'zone']
_search_by = ['name', 'ipv4addr', 'ipv6addr', 'mac']
_supports = ['delete', 'save']
_wapi_type = 'record:host'
def __init__(self, session, reference_id=None, name=None, **kwargs):
"""Create a new instance of a Host object. If a reference_id or valid
search criteria are passed in, the object will attempt to load the
values for the host from the Infoblox device.
When creating a new host or adding an ip address, use the
Host.add_ipv4_address and Host.add_ipv6_address methods::
host.add_ipv4addr('1.2.3.4')
Valid search criteria: name, ipv4addr, ipv6addr, mac
:param infobox.Session session: The established session object
:param str reference_id: The Infoblox reference id for the host
:param str host: The host's FQDN
:param dict kwargs: Optional keyword arguments
"""
self.name = name
super(Host, self).__init__(session, reference_id, **kwargs)
def add_ipv4addr(self, ipv4addr):
"""Add an IPv4 address to the host.
:param str ipv4addr: The IP address to add.
:raises: ValueError
"""
for addr in self.ipv4addrs:
if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or
(isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)):
raise ValueError('Already exists')
self.ipv4addrs.append({'ipv4addr': ipv4addr})
def remove_ipv4addr(self, ipv4addr):
"""Remove an IPv4 address from the host.
:param str ipv4addr: The IP address to remove
"""
for addr in self.ipv4addrs:
if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or
(isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)):
self.ipv4addrs.remove(addr)
break
def add_ipv6addr(self, ipv6addr):
"""Add an IPv6 address to the host.
:param str ipv6addr: The IP address to add.
:raises: ValueError
"""
for addr in self.ipv6addrs:
if ((isinstance(addr, dict) and addr['ipv6addr'] == ipv6addr) or
(isinstance(addr, HostIPv4) and addr.ipv6addr == ipv6addr)):
raise ValueError('Already exists')
self.ipv6addrs.append({'ipv6addr': ipv6addr})
def remove_ipv6addr(self, ipv6addr):
"""Remove an IPv6 address from the host.
:param str ipv6addr: The IP address to remove
"""
for addr in self.ipv6addrs:
if ((isinstance(addr, dict) and addr['ipv6addr'] == ipv6addr) or
(isinstance(addr, HostIPv4) and addr.ipv6addr == ipv6addr)):
self.ipv6addrs.remove(addr)
break
class HostIPv4(Record):
"""Implements the host_ipv4addr record type.
"""
bootfile = None
bootserver = None
configure_for_dhcp = None
deny_bootp = None
discovered_data = None
enable_pxe_lease_time = None
host = None
ignore_client_requested_options = None
ipv4addr = None
last_queried = None
mac = None
match_client = None
network = None
nextserver = None
options = None
pxe_lease_time = None
use_bootfile = None
use_bootserver = None
use_deny_bootp = None
use_for_ea_inheritance = None
use_ignore_client_requested_options = None
use_nextserver = None
use_options = None
use_pxe_lease_time = None
_repr_keys = ['ipv4addr']
_search_by = ['ipv4addr']
_wapi_type = 'record:host_ipv4addr'
def __init__(self, session, reference_id=None, ipv4addr=None, **kwargs):
"""Create a new instance of a HostIPv4 object. If a reference_id or
valid search criteria are passed in, the object will attempt to load
the values for the host_ipv4addr from the Infoblox device.
Valid search criteria: ipv4addr
:param infobox.Session session: The established session object
:param str reference_id: The Infoblox reference id for the host
:param str ipv4addr: The ipv4 address
:param dict kwargs: Optional keyword arguments
"""
self.ipv4addr = str(ipv4addr)
super(HostIPv4, self).__init__(session, reference_id, **kwargs)
def _save_as(self):
return {'ipv4addr': self.ipv4addr}
class HostIPv6(Record):
"""Implements the host_ipv6addr record type.
"""
address_type = None
configure_for_dhcp = True
discovered_data = None
domain_name = None
domain_name_servers = []
duid = None
host = None
ipv6addr = None
ipv6bits = None
ipv6prefix_bits = None
match_client = None
options = None
preferred_lifetime = 27000
use_domain_name = False
use_domain_name_servers = False
use_for_ea_inheritance = False
use_options = False
use_valid_lifetime = False
valid_lifetime = 43200
_repr_keys = ['ipv6addr', 'ipv6bits', 'ipv6prefix_bits']
_save_ignore = ['host']
_search_by = ['ipv6addr']
_wapi_type = 'record:host_ipv6addr'
def __init__(self, session, reference_id=None, ipv6addr=None,
ipv6bits=None, ipv6prefix_bits=None, **kwargs):
"""Create a new instance of a HostIPv6 object. If a reference_id or
valid search criteria are passed in, the object will attempt to load
the values for the host_ipv6addr from the Infoblox device.
Valid search criteria: ipv6addr
:param infobox.Session session: The established session object
:param str reference_id: The Infoblox reference id for the host
:param str ipv6addr: The ipv6 address
:param str ipv6bits: The ipv6 address bit count
:param str ipv6prefix_bits: The ipv6 address prefix bit count
:param dict kwargs: Optional keyword arguments
"""
self.ipv6addr = str(ipv6addr)
self.ipv6bits = str(ipv6bits)
self.ipv6prefix_bits = str(ipv6prefix_bits)
super(HostIPv6, self).__init__(session, reference_id, **kwargs)
def _save_as(self):
return {'ipv6addr': self.ipv6addr,
'ipv6bits': self.ipv6bits,
'ipv6prefix_bits': self.ipv6prefix_bits}
class IPv4Address(Record):
"""Implements the ipv4address record type.
"""
dhcp_client_identifier = None
extattrs = None
fingerprint = None
ip_address = None
is_conflict = None
lease_state = None
mac_address = None
names = None
network = None
network_view = None
objects = None
status = None
types = None
usage = None
username = None
_repr_keys = ['ip_address']
_search_by = ['ip_address']
_supports = ['fetch', 'put']
_wapi_type = 'record:host_ipv4addr'
def __init__(self, session, reference_id=None, ipv4addr=None, **kwargs):
"""Create a new instance of a HostIPv4 object. If a reference_id or
valid search criteria are passed in, the object will attempt to load
the values for the host_ipv4addr from the Infoblox device.
Valid search criteria: ipv4addr
:param infobox.Session session: The established session object
:param str reference_id: The Infoblox reference id for the host
:param str ipv4addr: The ipv4 address
:param dict kwargs: Optional keyword arguments
"""
self.ipv4addr = str(ipv4addr)
super(IPv4Address, self).__init__(session, reference_id, **kwargs)
def get_class(reference):
class_name = reference.split('/')[0].split(':')[1]
LOGGER.debug('Class: %s', class_name)
return CLASS_MAP.get(class_name)
CLASS_MAP = {'host': Host,
'host_ipv4addr': HostIPv4,
'host_ipv6addr': HostIPv6,
'ipv4address': IPv4Address}
| bsd-3-clause |
jalexvig/tensorflow | tensorflow/python/kernel_tests/decode_compressed_op_test.py | 29 | 2683 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeRaw op from parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import zlib
from six import BytesIO
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
class DecodeCompressedOpTest(test.TestCase):
def _compress(self, bytes_in, compression_type):
if not compression_type:
return bytes_in
elif compression_type == "ZLIB":
return zlib.compress(bytes_in)
else:
out = BytesIO()
with gzip.GzipFile(fileobj=out, mode="wb") as f:
f.write(bytes_in)
return out.getvalue()
def testDecompress(self):
for compression_type in ["ZLIB", "GZIP", ""]:
with self.test_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[2])
decompressed = parsing_ops.decode_compressed(
in_bytes, compression_type=compression_type)
self.assertEqual([2], decompressed.get_shape().as_list())
result = decompressed.eval(
feed_dict={in_bytes: [self._compress(b"AaAA", compression_type),
self._compress(b"bBbb", compression_type)]})
self.assertAllEqual([b"AaAA", b"bBbb"], result)
def testDecompressWithRaw(self):
for compression_type in ["ZLIB", "GZIP", ""]:
with self.test_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decompressed = parsing_ops.decode_compressed(
in_bytes, compression_type=compression_type)
decode = parsing_ops.decode_raw(decompressed, out_type=dtypes.int16)
result = decode.eval(
feed_dict={in_bytes: [self._compress(b"AaBC", compression_type)]})
self.assertAllEqual(
[[ord("A") + ord("a") * 256, ord("B") + ord("C") * 256]], result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
sahana/Turkey | modules/templates/DRK/layouts.py | 5 | 10012 | # -*- coding: utf-8 -*-
from gluon import *
from gluon.storage import Storage
from s3 import *
from s3theme import NAV, SECTION
# =============================================================================
class S3MainMenuLayout(S3NavigationItem):
""" Custom Main Menu Layout """
@staticmethod
def layout(item):
""" Custom Layout Method """
# Manage flags: hide any disabled/unauthorized items
if not item.authorized:
item.enabled = False
item.visible = False
elif item.enabled is None or item.enabled:
item.enabled = True
item.visible = True
if item.enabled and item.visible:
items = item.render_components()
if item.parent is not None:
classes = []
if item.parent.parent is None:
# Item at the top-level?
toplevel = True
if item.opts.right:
classes.append("menu-right")
else:
toplevel = False
if item.components:
classes.append("has-dropdown not-click")
if item.selected:
classes.append("active")
_class = " ".join(classes)
# Menu item with Dropdown
if item.get_first(enabled=True):
_href = item.url()
return LI(A(item.label,
_href=_href,
_id=item.attr._id
),
UL(items,
_class="dropdown"
),
_class=_class,
)
else:
# Menu item without Drop-Down
if toplevel:
item_url = item.url()
if item_url == URL(c="default", f="index"):
classes.append("menu-home")
if item.selected:
classes.append("active")
_class = " ".join(classes)
return LI(A(item.label,
_href=item_url,
_id=item.attr._id,
),
_class=_class,
)
else:
# Submenu item
if isinstance(item.label, dict):
if "name" in item.label:
label = item.label["name"]
else:
return None
else:
label = item.label
link = A(label, _href=item.url(), _id=item.attr._id)
return LI(link)
else:
# Main menu
right = []
left = []
for item in items:
if "menu-right" in item["_class"]:
item.remove_class("menu-right")
right.append(item)
else:
left.append(item)
right.reverse()
if current.response.s3.rtl:
right, left = left, right
return NAV(UL(LI(A(" ",
_href=URL(c="default", f="index"),
),
_class="name"
),
LI(A(SPAN(current.T("Menu"))),
_class="toggle-topbar menu-icon",
),
_class="title-area",
),
SECTION(UL(right, _class="right"),
UL(left, _class="left"),
_class="top-bar-section",
),
_class = "top-bar",
data = {"topbar": " "},
)
else:
return None
# ---------------------------------------------------------------------
@staticmethod
def checkbox_item(item):
""" Render special active items """
name = item.label
link = item.url()
_id = name["id"]
if "name" in name:
_name = name["name"]
else:
_name = ""
if "value" in name:
_value = name["value"]
else:
_value = False
if "request_type" in name:
_request_type = name["request_type"]
else:
_request_type = "ajax"
if link:
if _request_type == "ajax":
_onchange='''var val=$('#%s:checked').length;$.getS3('%s'+'?val='+val,null,false,null,false,false)''' % \
(_id, link)
else:
# Just load the page. Use this if the changed menu
# item should alter the contents of the page, and
# it's simpler just to load it.
_onchange="location.href='%s'" % link
else:
_onchange=None
return LI(A(INPUT(_type="checkbox",
_id=_id,
_onchange=_onchange,
value=_value,
),
"%s" % _name,
_nowrap="nowrap",
),
_class="menu-toggle",
)
# =============================================================================
class S3PersonalMenuLayout(S3NavigationItem):
@staticmethod
def layout(item):
if item.parent is None:
# The menu
items = item.render_components()
if items:
return TAG["ul"](items, _class="sub-nav personal-menu")
else:
return "" # menu is empty
else:
# A menu item
if item.enabled and item.authorized:
return TAG["li"](A(item.label, _href=item.url()))
else:
return None
# -----------------------------------------------------------------------------
# Shortcut
MP = S3PersonalMenuLayout
# =============================================================================
class S3AboutMenuLayout(S3NavigationItem):
@staticmethod
def layout(item):
if item.parent is None:
# The menu
items = item.render_components()
if items:
return TAG["ul"](items, _class="sub-nav about-menu left")
else:
return "" # menu is empty
else:
# A menu item
if item.enabled and item.authorized:
return TAG["li"](A(item.label, _href=item.url()))
else:
return None
# -----------------------------------------------------------------------------
# Shortcut
MA = S3AboutMenuLayout
# =============================================================================
class S3LanguageMenuLayout(S3NavigationItem):
@staticmethod
def layout(item):
""" Language menu layout
options for each entry:
- lang_code: the language code
- lang_name: the language name
option for the menu
- current_language: code of the current language
"""
if item.enabled:
if item.components:
# The language menu itself
current_language = current.T.accepted_language
items = item.render_components()
select = SELECT(items, value=current_language,
_name="_language",
# @ToDo T:
_title="Language Selection",
_onchange="S3.reloadWithQueryStringVars({'_language':$(this).val()});")
form = FORM(select, _class="language-selector",
_name="_language",
_action="",
_method="get")
return form
else:
# A language entry
return OPTION(item.opts.lang_name,
_value=item.opts.lang_code)
else:
return None
# -------------------------------------------------------------------------
def check_enabled(self):
""" Check whether the language menu is enabled """
if current.deployment_settings.get_L10n_display_toolbar():
return True
else:
return False
# -----------------------------------------------------------------------------
# Shortcut
ML = S3LanguageMenuLayout
# =============================================================================
class S3OrgMenuLayout(S3NavigationItem):
""" Layout for the organisation-specific menu """
@staticmethod
def layout(item):
name = "Deutsches Rotes Kreuz"
logo = IMG(_src = "/%s/static/themes/DRK/img/logo_small.png" %
current.request.application,
_alt = "Deutsches Rotes Kreuz",
_width=40,
)
# Note: render using current.menu.org.render()[0] + current.menu.org.render()[1]
return (name, logo)
# -----------------------------------------------------------------------------
# Shortcut
OM = S3OrgMenuLayout
# END =========================================================================
| mit |
prakxys/flask | Work/Trivia - Module 5/env/Lib/site-packages/pip/_vendor/requests/packages/chardet/compat.py | 2943 | 1157 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
| apache-2.0 |
jbouse-debian/paramiko | tests/test_kex.py | 2 | 22512 | # Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Some unit tests for the key exchange protocols.
"""
from binascii import hexlify, unhexlify
import os
import unittest
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
import paramiko.util
from paramiko.kex_group1 import KexGroup1
from paramiko.kex_gex import KexGex, KexGexSHA256
from paramiko import Message
from paramiko.common import byte_chr
from paramiko.kex_ecdh_nist import KexNistp256
def dummy_urandom(n):
return byte_chr(0xcc) * n
def dummy_generate_key_pair(obj):
private_key_value = 94761803665136558137557783047955027733968423115106677159790289642479432803037
public_key_numbers = "042bdab212fa8ba1b7c843301682a4db424d307246c7e1e6083c41d9ca7b098bf30b3d63e2ec6278488c135360456cc054b3444ecc45998c08894cbc1370f5f989"
public_key_numbers_obj = ec.EllipticCurvePublicNumbers.from_encoded_point(ec.SECP256R1(), unhexlify(public_key_numbers))
obj.P = ec.EllipticCurvePrivateNumbers(private_value=private_key_value, public_numbers=public_key_numbers_obj).private_key(default_backend())
if obj.transport.server_mode:
obj.Q_S = ec.EllipticCurvePublicNumbers.from_encoded_point(ec.SECP256R1(), unhexlify(public_key_numbers)).public_key(default_backend())
return
obj.Q_C = ec.EllipticCurvePublicNumbers.from_encoded_point(ec.SECP256R1(), unhexlify(public_key_numbers)).public_key(default_backend())
class FakeKey (object):
def __str__(self):
return 'fake-key'
def asbytes(self):
return b'fake-key'
def sign_ssh_data(self, H):
return b'fake-sig'
class FakeModulusPack (object):
P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
G = 2
def get_modulus(self, min, ask, max):
return self.G, self.P
class FakeTransport(object):
local_version = 'SSH-2.0-paramiko_1.0'
remote_version = 'SSH-2.0-lame'
local_kex_init = 'local-kex-init'
remote_kex_init = 'remote-kex-init'
def _send_message(self, m):
self._message = m
def _expect_packet(self, *t):
self._expect = t
def _set_K_H(self, K, H):
self._K = K
self._H = H
def _verify_key(self, host_key, sig):
self._verify = (host_key, sig)
def _activate_outbound(self):
self._activated = True
def _log(self, level, s):
pass
def get_server_key(self):
return FakeKey()
def _get_modulus_pack(self):
return FakeModulusPack()
class KexTest (unittest.TestCase):
K = 14730343317708716439807310032871972459448364195094179797249681733965528989482751523943515690110179031004049109375612685505881911274101441415545039654102474376472240501616988799699744135291070488314748284283496055223852115360852283821334858541043710301057312858051901453919067023103730011648890038847384890504
def setUp(self):
self._original_urandom = os.urandom
os.urandom = dummy_urandom
self._original_generate_key_pair = KexNistp256._generate_key_pair
KexNistp256._generate_key_pair = dummy_generate_key_pair
def tearDown(self):
os.urandom = self._original_urandom
KexNistp256._generate_key_pair = self._original_generate_key_pair
def test_1_group1_client(self):
transport = FakeTransport()
transport.server_mode = False
kex = KexGroup1(transport)
kex.start_kex()
x = b'1E000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4'
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertEqual((paramiko.kex_group1._MSG_KEXDH_REPLY,), transport._expect)
# fake "reply"
msg = Message()
msg.add_string('fake-host-key')
msg.add_mpint(69)
msg.add_string('fake-sig')
msg.rewind()
kex.parse_next(paramiko.kex_group1._MSG_KEXDH_REPLY, msg)
H = b'03079780F3D3AD0B3C6DB30C8D21685F367A86D2'
self.assertEqual(self.K, transport._K)
self.assertEqual(H, hexlify(transport._H).upper())
self.assertEqual((b'fake-host-key', b'fake-sig'), transport._verify)
self.assertTrue(transport._activated)
def test_2_group1_server(self):
transport = FakeTransport()
transport.server_mode = True
kex = KexGroup1(transport)
kex.start_kex()
self.assertEqual((paramiko.kex_group1._MSG_KEXDH_INIT,), transport._expect)
msg = Message()
msg.add_mpint(69)
msg.rewind()
kex.parse_next(paramiko.kex_group1._MSG_KEXDH_INIT, msg)
H = b'B16BF34DD10945EDE84E9C1EF24A14BFDC843389'
x = b'1F0000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967'
self.assertEqual(self.K, transport._K)
self.assertEqual(H, hexlify(transport._H).upper())
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertTrue(transport._activated)
def test_3_gex_client(self):
transport = FakeTransport()
transport.server_mode = False
kex = KexGex(transport)
kex.start_kex()
x = b'22000004000000080000002000'
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect)
msg = Message()
msg.add_mpint(FakeModulusPack.P)
msg.add_mpint(FakeModulusPack.G)
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg)
x = b'20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4'
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect)
msg = Message()
msg.add_string('fake-host-key')
msg.add_mpint(69)
msg.add_string('fake-sig')
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg)
H = b'A265563F2FA87F1A89BF007EE90D58BE2E4A4BD0'
self.assertEqual(self.K, transport._K)
self.assertEqual(H, hexlify(transport._H).upper())
self.assertEqual((b'fake-host-key', b'fake-sig'), transport._verify)
self.assertTrue(transport._activated)
def test_4_gex_old_client(self):
transport = FakeTransport()
transport.server_mode = False
kex = KexGex(transport)
kex.start_kex(_test_old_style=True)
x = b'1E00000800'
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect)
msg = Message()
msg.add_mpint(FakeModulusPack.P)
msg.add_mpint(FakeModulusPack.G)
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg)
x = b'20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4'
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect)
msg = Message()
msg.add_string('fake-host-key')
msg.add_mpint(69)
msg.add_string('fake-sig')
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg)
H = b'807F87B269EF7AC5EC7E75676808776A27D5864C'
self.assertEqual(self.K, transport._K)
self.assertEqual(H, hexlify(transport._H).upper())
self.assertEqual((b'fake-host-key', b'fake-sig'), transport._verify)
self.assertTrue(transport._activated)
def test_5_gex_server(self):
transport = FakeTransport()
transport.server_mode = True
kex = KexGex(transport)
kex.start_kex()
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD), transport._expect)
msg = Message()
msg.add_int(1024)
msg.add_int(2048)
msg.add_int(4096)
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, msg)
x = b'1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102'
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect)
msg = Message()
msg.add_mpint(12345)
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg)
K = 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581
H = b'CE754197C21BF3452863B4F44D0B3951F12516EF'
x = b'210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967'
self.assertEqual(K, transport._K)
self.assertEqual(H, hexlify(transport._H).upper())
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertTrue(transport._activated)
def test_6_gex_server_with_old_client(self):
transport = FakeTransport()
transport.server_mode = True
kex = KexGex(transport)
kex.start_kex()
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD), transport._expect)
msg = Message()
msg.add_int(2048)
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, msg)
x = b'1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102'
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect)
msg = Message()
msg.add_mpint(12345)
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg)
K = 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581
H = b'B41A06B2E59043CEFC1AE16EC31F1E2D12EC455B'
x = b'210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967'
self.assertEqual(K, transport._K)
self.assertEqual(H, hexlify(transport._H).upper())
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertTrue(transport._activated)
def test_7_gex_sha256_client(self):
transport = FakeTransport()
transport.server_mode = False
kex = KexGexSHA256(transport)
kex.start_kex()
x = b'22000004000000080000002000'
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect)
msg = Message()
msg.add_mpint(FakeModulusPack.P)
msg.add_mpint(FakeModulusPack.G)
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg)
x = b'20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4'
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect)
msg = Message()
msg.add_string('fake-host-key')
msg.add_mpint(69)
msg.add_string('fake-sig')
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg)
H = b'AD1A9365A67B4496F05594AD1BF656E3CDA0851289A4C1AFF549FEAE50896DF4'
self.assertEqual(self.K, transport._K)
self.assertEqual(H, hexlify(transport._H).upper())
self.assertEqual((b'fake-host-key', b'fake-sig'), transport._verify)
self.assertTrue(transport._activated)
def test_8_gex_sha256_old_client(self):
transport = FakeTransport()
transport.server_mode = False
kex = KexGexSHA256(transport)
kex.start_kex(_test_old_style=True)
x = b'1E00000800'
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect)
msg = Message()
msg.add_mpint(FakeModulusPack.P)
msg.add_mpint(FakeModulusPack.G)
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg)
x = b'20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4'
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect)
msg = Message()
msg.add_string('fake-host-key')
msg.add_mpint(69)
msg.add_string('fake-sig')
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg)
H = b'518386608B15891AE5237DEE08DCADDE76A0BCEFCE7F6DB3AD66BC41D256DFE5'
self.assertEqual(self.K, transport._K)
self.assertEqual(H, hexlify(transport._H).upper())
self.assertEqual((b'fake-host-key', b'fake-sig'), transport._verify)
self.assertTrue(transport._activated)
def test_9_gex_sha256_server(self):
transport = FakeTransport()
transport.server_mode = True
kex = KexGexSHA256(transport)
kex.start_kex()
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD), transport._expect)
msg = Message()
msg.add_int(1024)
msg.add_int(2048)
msg.add_int(4096)
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, msg)
x = b'1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102'
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect)
msg = Message()
msg.add_mpint(12345)
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg)
K = 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581
H = b'CCAC0497CF0ABA1DBF55E1A3995D17F4CC31824B0E8D95CDF8A06F169D050D80'
x = b'210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967'
self.assertEqual(K, transport._K)
self.assertEqual(H, hexlify(transport._H).upper())
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertTrue(transport._activated)
def test_10_gex_sha256_server_with_old_client(self):
transport = FakeTransport()
transport.server_mode = True
kex = KexGexSHA256(transport)
kex.start_kex()
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD), transport._expect)
msg = Message()
msg.add_int(2048)
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, msg)
x = b'1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102'
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect)
msg = Message()
msg.add_mpint(12345)
msg.rewind()
kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg)
K = 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581
H = b'3DDD2AD840AD095E397BA4D0573972DC60F6461FD38A187CACA6615A5BC8ADBB'
x = b'210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967'
self.assertEqual(K, transport._K)
self.assertEqual(H, hexlify(transport._H).upper())
self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
self.assertTrue(transport._activated)
def test_11_kex_nistp256_client(self):
K = 91610929826364598472338906427792435253694642563583721654249504912114314269754
transport = FakeTransport()
transport.server_mode = False
kex = KexNistp256(transport)
kex.start_kex()
self.assertEqual((paramiko.kex_ecdh_nist._MSG_KEXECDH_REPLY,), transport._expect)
#fake reply
msg = Message()
msg.add_string('fake-host-key')
Q_S = unhexlify("043ae159594ba062efa121480e9ef136203fa9ec6b6e1f8723a321c16e62b945f573f3b822258cbcd094b9fa1c125cbfe5f043280893e66863cc0cb4dccbe70210")
msg.add_string(Q_S)
msg.add_string('fake-sig')
msg.rewind()
kex.parse_next(paramiko.kex_ecdh_nist._MSG_KEXECDH_REPLY, msg)
H = b'BAF7CE243A836037EB5D2221420F35C02B9AB6C957FE3BDE3369307B9612570A'
self.assertEqual(K, kex.transport._K)
self.assertEqual(H, hexlify(transport._H).upper())
self.assertEqual((b'fake-host-key', b'fake-sig'), transport._verify)
self.assertTrue(transport._activated)
def test_12_kex_nistp256_server(self):
K = 91610929826364598472338906427792435253694642563583721654249504912114314269754
transport = FakeTransport()
transport.server_mode = True
kex = KexNistp256(transport)
kex.start_kex()
self.assertEqual((paramiko.kex_ecdh_nist._MSG_KEXECDH_INIT,), transport._expect)
#fake init
msg=Message()
Q_C = unhexlify("043ae159594ba062efa121480e9ef136203fa9ec6b6e1f8723a321c16e62b945f573f3b822258cbcd094b9fa1c125cbfe5f043280893e66863cc0cb4dccbe70210")
H = b'2EF4957AFD530DD3F05DBEABF68D724FACC060974DA9704F2AEE4C3DE861E7CA'
msg.add_string(Q_C)
msg.rewind()
kex.parse_next(paramiko.kex_ecdh_nist._MSG_KEXECDH_INIT, msg)
self.assertEqual(K, transport._K)
self.assertTrue(transport._activated)
self.assertEqual(H, hexlify(transport._H).upper())
| lgpl-2.1 |
ryfeus/lambda-packs | Skimage_numpy/source/numpy/polynomial/chebyshev.py | 30 | 62880 | """
Objects for dealing with Chebyshev series.
This module provides a number of objects (mostly functions) useful for
dealing with Chebyshev series, including a `Chebyshev` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `chebdomain` -- Chebyshev series default domain, [-1,1].
- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates
identically to 0.
- `chebone` -- (Coefficients of the) Chebyshev series that evaluates
identically to 1.
- `chebx` -- (Coefficients of the) Chebyshev series for the identity map,
``f(x) = x``.
Arithmetic
----------
- `chebadd` -- add two Chebyshev series.
- `chebsub` -- subtract one Chebyshev series from another.
- `chebmul` -- multiply two Chebyshev series.
- `chebdiv` -- divide one Chebyshev series by another.
- `chebpow` -- raise a Chebyshev series to an positive integer power
- `chebval` -- evaluate a Chebyshev series at given points.
- `chebval2d` -- evaluate a 2D Chebyshev series at given points.
- `chebval3d` -- evaluate a 3D Chebyshev series at given points.
- `chebgrid2d` -- evaluate a 2D Chebyshev series on a Cartesian product.
- `chebgrid3d` -- evaluate a 3D Chebyshev series on a Cartesian product.
Calculus
--------
- `chebder` -- differentiate a Chebyshev series.
- `chebint` -- integrate a Chebyshev series.
Misc Functions
--------------
- `chebfromroots` -- create a Chebyshev series with specified roots.
- `chebroots` -- find the roots of a Chebyshev series.
- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials.
- `chebvander2d` -- Vandermonde-like matrix for 2D power series.
- `chebvander3d` -- Vandermonde-like matrix for 3D power series.
- `chebgauss` -- Gauss-Chebyshev quadrature, points and weights.
- `chebweight` -- Chebyshev weight function.
- `chebcompanion` -- symmetrized companion matrix in Chebyshev form.
- `chebfit` -- least-squares fit returning a Chebyshev series.
- `chebpts1` -- Chebyshev points of the first kind.
- `chebpts2` -- Chebyshev points of the second kind.
- `chebtrim` -- trim leading coefficients from a Chebyshev series.
- `chebline` -- Chebyshev series representing given straight line.
- `cheb2poly` -- convert a Chebyshev series to a polynomial.
- `poly2cheb` -- convert a polynomial to a Chebyshev series.
Classes
-------
- `Chebyshev` -- A Chebyshev series class.
See also
--------
`numpy.polynomial`
Notes
-----
The implementations of multiplication, division, integration, and
differentiation use the algebraic identities [1]_:
.. math ::
T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\
z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}.
where
.. math :: x = \\frac{z + z^{-1}}{2}.
These identities allow a Chebyshev series to be expressed as a finite,
symmetric Laurent series. In this module, this sort of Laurent series
is referred to as a "z-series."
References
----------
.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev
Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
(preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd',
'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval',
'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots',
'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1',
'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d',
'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion',
'chebgauss', 'chebweight']
chebtrim = pu.trimcoef
#
# A collection of functions for manipulating z-series. These are private
# functions and do minimal error checking.
#
def _cseries_to_zseries(c):
"""Covert Chebyshev series to z-series.
Covert a Chebyshev series to the equivalent z-series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high
Returns
-------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
"""
n = c.size
zs = np.zeros(2*n-1, dtype=c.dtype)
zs[n-1:] = c/2
return zs + zs[::-1]
def _zseries_to_cseries(zs):
"""Covert z-series to a Chebyshev series.
Covert a z series to the equivalent Chebyshev series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
Returns
-------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high.
"""
n = (zs.size + 1)//2
c = zs[n-1:].copy()
c[1:n] *= 2
return c
def _zseries_mul(z1, z2):
"""Multiply two z-series.
Multiply two z-series to produce a z-series.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D but this is not checked.
Returns
-------
product : 1-D ndarray
The product z-series.
Notes
-----
This is simply convolution. If symmetric/anti-symmetric z-series are
denoted by S/A then the following rules apply:
S*S, A*A -> S
S*A, A*S -> A
"""
return np.convolve(z1, z2)
def _zseries_div(z1, z2):
"""Divide the first z-series by the second.
Divide `z1` by `z2` and return the quotient and remainder as z-series.
Warning: this implementation only applies when both z1 and z2 have the
same symmetry, which is sufficient for present purposes.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D and have the same symmetry, but this is not
checked.
Returns
-------
(quotient, remainder) : 1-D ndarrays
Quotient and remainder as z-series.
Notes
-----
This is not the same as polynomial division on account of the desired form
of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A
then the following rules apply:
S/S -> S,S
A/A -> S,A
The restriction to types of the same symmetry could be fixed but seems like
unneeded generality. There is no natural form for the remainder in the case
where there is no symmetry.
"""
z1 = z1.copy()
z2 = z2.copy()
len1 = len(z1)
len2 = len(z2)
if len2 == 1:
z1 /= z2
return z1, z1[:1]*0
elif len1 < len2:
return z1[:1]*0, z1
else:
dlen = len1 - len2
scl = z2[0]
z2 /= scl
quo = np.empty(dlen + 1, dtype=z1.dtype)
i = 0
j = dlen
while i < j:
r = z1[i]
quo[i] = z1[i]
quo[dlen - i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
z1[j:j+len2] -= tmp
i += 1
j -= 1
r = z1[i]
quo[i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
quo /= scl
rem = z1[i+1:i-1+len2].copy()
return quo, rem
def _zseries_der(zs):
"""Differentiate a z-series.
The derivative is with respect to x, not z. This is achieved using the
chain rule and the value of dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to differentiate.
Returns
-------
derivative : z-series
The derivative
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
multiplying the value of zs by two also so that the two cancels in the
division.
"""
n = len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs *= np.arange(-n, n+1)*2
d, r = _zseries_div(zs, ns)
return d
def _zseries_int(zs):
"""Integrate a z-series.
The integral is with respect to x, not z. This is achieved by a change
of variable using dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to integrate
Returns
-------
integral : z-series
The indefinite integral
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
dividing the resulting zs by two.
"""
n = 1 + len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs = _zseries_mul(zs, ns)
div = np.arange(-n, n+1)*2
zs[:n] /= div[:n]
zs[n+1:] /= div[n+1:]
zs[n] = 0
return zs
#
# Chebyshev series functions
#
def poly2cheb(pol):
"""
Convert a polynomial to a Chebyshev series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Chebyshev series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Chebyshev
series.
See Also
--------
cheb2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(range(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = p.convert(kind=P.Chebyshev)
>>> c
Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.])
>>> P.poly2cheb(range(4))
array([ 1. , 3.25, 1. , 0.75])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = chebadd(chebmulx(res), pol[i])
return res
def cheb2poly(c):
"""
Convert a Chebyshev series to a polynomial.
Convert an array representing the coefficients of a Chebyshev series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Chebyshev series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2cheb
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> c = P.Chebyshev(range(4))
>>> c
Chebyshev([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([ -2., -8., 4., 12.], [-1., 1.])
>>> P.cheb2poly(range(4))
array([ -2., -8., 4., 12.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1)
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Chebyshev default domain.
chebdomain = np.array([-1, 1])
# Chebyshev coefficients representing zero.
chebzero = np.array([0])
# Chebyshev coefficients representing one.
chebone = np.array([1])
# Chebyshev coefficients representing the identity x.
chebx = np.array([0, 1])
def chebline(off, scl):
"""
Chebyshev series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Chebyshev series for
``off + scl*x``.
See Also
--------
polyline
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebline(3,2)
array([3, 2])
>>> C.chebval(-3, C.chebline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def chebfromroots(roots):
"""
Generate a Chebyshev series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Chebyshev form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Chebyshev form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.5+0.j, 0.0+0.j, 0.5+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [chebline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [chebmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = chebmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def chebadd(c1, c2):
"""
Add one Chebyshev series to another.
Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Chebyshev series of their sum.
See Also
--------
chebsub, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Chebyshev series
is a Chebyshev series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebsub(c1, c2):
"""
Subtract one Chebyshev series from another.
Returns the difference of two Chebyshev series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their difference.
See Also
--------
chebadd, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Chebyshev
series is a Chebyshev series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebsub(c1,c2)
array([-2., 0., 2.])
>>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebmulx(c):
"""Multiply a Chebyshev series by x.
Multiply the polynomial `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
.. versionadded:: 1.5.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
if len(c) > 1:
tmp = c[1:]/2
prd[2:] = tmp
prd[0:-2] += tmp
return prd
def chebmul(c1, c2):
"""
Multiply one Chebyshev series by another.
Returns the product of two Chebyshev series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their product.
See Also
--------
chebadd, chebsub, chebdiv, chebpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Chebyshev polynomial basis set. Thus, to express
the product as a C-series, it is typically necessary to "reproject"
the product onto said basis set, which typically produces
"unintuitive live" (but correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebmul(c1,c2) # multiplication requires "reprojection"
array([ 6.5, 12. , 12. , 4. , 1.5])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
prd = _zseries_mul(z1, z2)
ret = _zseries_to_cseries(prd)
return pu.trimseq(ret)
def chebdiv(c1, c2):
"""
Divide one Chebyshev series by another.
Returns the quotient-with-remainder of two Chebyshev series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Chebyshev series coefficients representing the quotient and
remainder.
See Also
--------
chebadd, chebsub, chebmul, chebpow
Notes
-----
In general, the (polynomial) division of one C-series by another
results in quotient and remainder terms that are not in the Chebyshev
polynomial basis set. Thus, to express these results as C-series, it
is typically necessary to "reproject" the results onto said basis
set, which typically produces "unintuitive" (but correct) results;
see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> C.chebdiv(c2,c1) # neither "intuitive"
(array([ 0., 2.]), array([-2., -4.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
quo, rem = _zseries_div(z1, z2)
quo = pu.trimseq(_zseries_to_cseries(quo))
rem = pu.trimseq(_zseries_to_cseries(rem))
return quo, rem
def chebpow(c, pow, maxpower=16):
"""Raise a Chebyshev series to a power.
Returns the Chebyshev series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmul, chebdiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
zs = _cseries_to_zseries(c)
prd = zs
for i in range(2, power + 1):
prd = np.convolve(prd, zs)
return _zseries_to_cseries(prd)
def chebder(c, m=1, scl=1, axis=0):
"""
Differentiate a Chebyshev series.
Returns the Chebyshev series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2``
while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) +
2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Chebyshev series of the derivative.
See Also
--------
chebint
Notes
-----
In general, the result of differentiating a C-series needs to be
"reprojected" onto the C-series basis set. Thus, typically, the
result of this function is "unintuitive," albeit correct; see Examples
section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3,4)
>>> C.chebder(c)
array([ 14., 12., 24.])
>>> C.chebder(c,3)
array([ 96.])
>>> C.chebder(c,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(c,2,-1)
array([ 12., 96.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j)*c[j]
c[j - 2] += (j*c[j])/(j - 2)
if n > 1:
der[1] = 4*c[2]
der[0] = c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Chebyshev series.
Returns the Chebyshev series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]]
represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) +
2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
C-series coefficients of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
chebder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a`- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3)
>>> C.chebint(c)
array([ 0.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,3)
array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667,
0.00625 ])
>>> C.chebint(c, k=3)
array([ 3.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,lbnd=-2)
array([ 8.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,scl=-2)
array([-1., 1., -1., -1.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/4
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[j - 1] -= c[j]/(2*(j - 1))
tmp[0] += k[i] - chebval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def chebval(x, c, tensor=True):
"""
Evaluate a Chebyshev series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
chebval2d, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
x2 = 2*x
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
c0 = c[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
def chebval2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than 2 the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
chebval, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = chebval(x, c)
c = chebval(y, c, tensor=False)
return c
def chebgrid2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * T_i(a) * T_j(b),
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = chebval(x, c)
c = chebval(y, c)
return c
def chebval3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = chebval(x, c)
c = chebval(y, c, tensor=False)
c = chebval(z, c, tensor=False)
return c
def chebgrid3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
c = chebval(x, c)
c = chebval(y, c)
c = chebval(z, c)
return c
def chebvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = T_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Chebyshev polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and
``chebval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Chebyshev series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Chebyshev polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries.
v[0] = x*0 + 1
if ideg > 0:
x2 = 2*x
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i-1]*x2 - v[i-2]
return np.rollaxis(v, 0, v.ndim)
def chebvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = T_i(x) * T_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Chebyshev polynomials.
If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
chebvander, chebvander3d. chebval2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = chebvander(x, degx)
vy = chebvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def chebvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Chebyshev polynomials.
If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
chebvander, chebvander3d. chebval2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = chebvander(x, degx)
vy = chebvander(y, degy)
vz = chebvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def chebfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Chebyshev series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For Numpy versions >= 1.11 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Chebyshev coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
polyfit, legfit, lagfit, hermfit, hermefit
chebval : Evaluates a Chebyshev series.
chebvander : Vandermonde matrix of Chebyshev series.
chebweight : Chebyshev weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Chebyshev series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Chebyshev series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = chebvander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = chebvander(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax + 1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def chebcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is a Chebyshev basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.array([1.] + [np.sqrt(.5)]*(n-1))
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[0] = np.sqrt(.5)
top[1:] = 1/2
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5
return mat
def chebroots(c):
"""
Compute the roots of a Chebyshev series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * T_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Chebyshev series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.chebyshev as cheb
>>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots
array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = chebcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def chebgauss(deg):
"""
Gauss-Chebyshev quadrature.
Computes the sample points and weights for Gauss-Chebyshev quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1/\sqrt{1 - x^2}`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. For Gauss-Chebyshev there are closed form solutions for
the sample points and weights. If n = `deg`, then
.. math:: x_i = \cos(\pi (2 i - 1) / (2 n))
.. math:: w_i = \pi / n
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg))
w = np.ones(ideg)*(np.pi/ideg)
return x, w
def chebweight(x):
"""
The weight function of the Chebyshev polynomials.
The weight function is :math:`1/\sqrt{1 - x^2}` and the interval of
integration is :math:`[-1, 1]`. The Chebyshev polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded:: 1.7.0
"""
w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x))
return w
def chebpts1(npts):
"""
Chebyshev points of the first kind.
The Chebyshev points of the first kind are the points ``cos(x)``,
where ``x = [pi*(k + .5)/npts for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the first kind.
See Also
--------
chebpts2
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 1:
raise ValueError("npts must be >= 1")
x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts)
return np.cos(x)
def chebpts2(npts):
"""
Chebyshev points of the second kind.
The Chebyshev points of the second kind are the points ``cos(x)``,
where ``x = [pi*k/(npts - 1) for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the second kind.
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 2:
raise ValueError("npts must be >= 2")
x = np.linspace(-np.pi, 0, _npts)
return np.cos(x)
#
# Chebyshev series class
#
class Chebyshev(ABCPolyBase):
"""A Chebyshev series class.
The Chebyshev class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
methods listed below.
Parameters
----------
coef : array_like
Chebyshev coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(chebadd)
_sub = staticmethod(chebsub)
_mul = staticmethod(chebmul)
_div = staticmethod(chebdiv)
_pow = staticmethod(chebpow)
_val = staticmethod(chebval)
_int = staticmethod(chebint)
_der = staticmethod(chebder)
_fit = staticmethod(chebfit)
_line = staticmethod(chebline)
_roots = staticmethod(chebroots)
_fromroots = staticmethod(chebfromroots)
# Virtual properties
nickname = 'cheb'
domain = np.array(chebdomain)
window = np.array(chebdomain)
| mit |
josjevv/django-cms | cms/cache/__init__.py | 46 | 2928 | # -*- coding: utf-8 -*-
import re
from cms.utils import get_cms_setting
CMS_PAGE_CACHE_VERSION_KEY = get_cms_setting("CACHE_PREFIX") + 'CMS_PAGE_CACHE_VERSION'
def _get_cache_version():
"""
Returns the current page cache version, explicitly setting one if not
defined.
"""
from django.core.cache import cache
version = cache.get(CMS_PAGE_CACHE_VERSION_KEY)
if version:
return version
else:
_set_cache_version(1)
return 1
def _set_cache_version(version):
"""
Set the cache version to the specified value.
"""
from django.core.cache import cache
cache.set(
CMS_PAGE_CACHE_VERSION_KEY,
version,
get_cms_setting('CACHE_DURATIONS')['content']
)
def invalidate_cms_page_cache():
"""
Invalidates the CMS PAGE CACHE.
"""
#
# NOTE: We're using a cache versioning strategy for invalidating the page
# cache when necessary. Instead of wiping all the old entries, we simply
# increment the version number rendering all previous entries
# inaccessible and left to expire naturally.
#
# ALSO NOTE: According to the Django documentation, a timeout value of
# `None' (in version 1.6+) is supposed to mean "cache forever", however,
# this is actually only implemented as only slightly less than 30 days in
# some backends (memcached, in particular). In older Djangos, `None' means
# "use default value". To avoid issues arising from different Django
# versions and cache backend implementations, we will explicitly set the
# lifespan of the CMS_PAGE_CACHE_VERSION entry to whatever is set in
# settings.CACHE_DURATIONS['content']. This allows users to adjust as
# necessary for their backend.
#
# To prevent writing cache entries that will live longer than our version
# key, we will always re-write the current version number into the cache
# just after we write any new cache entries, thus ensuring that the
# version number will always outlive any entries written against that
# version. This is a cheap operation.
#
# If there are no new cache writes before the version key expires, its
# perfectly OK, since any previous entries cached against that version
# will have also expired, so, it'd be pointless to try to access them
# anyway.
#
version = _get_cache_version()
_set_cache_version(version + 1)
CLEAN_KEY_PATTERN = re.compile(r'[^a-zA-Z0-9_-]')
def _clean_key(key):
return CLEAN_KEY_PATTERN.sub('-', key)
def _get_cache_key(name, page_lookup, lang, site_id):
from cms.models import Page
if isinstance(page_lookup, Page):
page_key = str(page_lookup.pk)
else:
page_key = str(page_lookup)
page_key = _clean_key(page_key)
return get_cms_setting('CACHE_PREFIX') + name + '__page_lookup:' + page_key + '_site:' + str(site_id) + '_lang:' + str(lang)
| bsd-3-clause |
lixiangning888/whole_project | modules/signatures_orignal/modifies_uac_notify.py | 2 | 1067 | # Copyright (C) 2015 Kevin Ross
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class ModifiesUACNotify(Signature):
name = "modify_uac_prompt"
description = "Attempts to modify UAC prompt behavior"
severity = 3
categories = ["stealth"]
authors = ["Kevin Ross"]
minimum = "1.2"
def run(self):
reg_indicators = [
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Policies\\\\System\\\\ConsentPromptBehaviorAdmin$",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Policies\\\\System\\\\ConsentPromptBehaviorUser$",
".*\\\\SOFTWARE\\\\(Wow6432Node\\\\)?Microsoft\\\\Windows\\\\CurrentVersion\\\\Policies\\\\System\\\\PromptOnSecureDesktop$",
]
for indicator in reg_indicators:
if self.check_write_key(pattern=indicator, regex=True):
return True
return False | lgpl-3.0 |
blacklin/kbengine | kbe/res/scripts/common/Lib/test/_test_multiprocessing.py | 67 | 120261 | #
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import test.support
import test.script_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
def latin(s):
return s.encode('latin')
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
@classmethod
def _test_terminate(cls):
time.sleep(100)
def test_terminate(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
p.terminate()
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
sys.stderr = open(testfn, 'w')
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
sys.stderr = open(testfn, 'w')
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason, code in (([1, 2, 3], 1), ('ignore this', 1)):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, code)
with open(testfn, 'r') as f:
self.assertEqual(f.read().rstrip(), str(reason))
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
def test_timeout(self):
q = multiprocessing.Queue()
start = time.time()
self.assertRaises(pyqueue.Empty, q.get, True, 0.2)
delta = time.time() - start
self.assertGreaterEqual(delta, 0.18)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
for i in range(10):
try:
if get_value(woken) == 6:
break
except NotImplementedError:
break
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.time()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.time() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=10))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(10))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 10)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
f(*args)
b.wait_for_finished()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(1000)))
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=53)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_make_pool(self):
self.assertRaises(ValueError, multiprocessing.Pool, -1)
self.assertRaises(ValueError, multiprocessing.Pool, 0)
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertLess(join.elapsed, 0.5)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with multiprocessing.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
raise AssertionError('expected RuntimeError')
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
queue.put(('hello world', None, True, 2.25))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=5)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen(1)
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process(q):
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=_test_process, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen(4)
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], expected)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.time()
res = wait([a, b], 20)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.time()
res = wait([a], timeout=-1)
t = time.time() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
p.join(10)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.script_helper.assert_python_failure(name, sm)
self.assertEqual('', out.decode('ascii'))
self.assertIn('RuntimeError', err.decode('ascii'))
else:
rc, out, err = test.script_helper.assert_python_ok(name, sm)
self.assertEqual('123', out.decode('ascii').rstrip())
self.assertEqual('', err.decode('ascii'))
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recurisvely start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
p.join(timeout=5)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
p.join(timeout=5)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
p.join(timeout=5)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
#
# Check that killing process does not leak named semaphores
#
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, 'semaphore_tracker: %r: \[Errno' % name1)
#
# Mixins
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
t = 0.01
while len(multiprocessing.active_children()) > 1 and t < 5:
time.sleep(t)
t *= 2
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
print('Shared objects which still exist at manager shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
time.sleep(0.5)
multiprocessing.process._cleanup()
gc.collect()
tmp = set(multiprocessing.process._dangling) - set(dangling[0])
if tmp:
print('Dangling processes:', tmp, file=sys.stderr)
del tmp
tmp = set(threading._dangling) - set(dangling[1])
if tmp:
print('Dangling threads:', tmp, file=sys.stderr)
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
| lgpl-3.0 |
yoelk/kivy | kivy/core/image/img_tex.py | 54 | 1548 | '''
Tex: Compressed texture
'''
__all__ = ('ImageLoaderTex', )
import json
from struct import unpack
from kivy.logger import Logger
from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
class ImageLoaderTex(ImageLoaderBase):
@staticmethod
def extensions():
return ('tex', )
def load(self, filename):
try:
fd = open(filename, 'rb')
if fd.read(4) != 'KTEX':
raise Exception('Invalid tex identifier')
headersize = unpack('I', fd.read(4))[0]
header = fd.read(headersize)
if len(header) != headersize:
raise Exception('Truncated tex header')
info = json.loads(header)
data = fd.read()
if len(data) != info['datalen']:
raise Exception('Truncated tex data')
except:
Logger.warning('Image: Image <%s> is corrupted' % filename)
raise
width, height = info['image_size']
tw, th = info['texture_size']
images = [data]
im = ImageData(width, height, str(info['format']), images[0],
source=filename)
'''
if len(dds.images) > 1:
images = dds.images
images_size = dds.images_size
for index in range(1, len(dds.images)):
w, h = images_size[index]
data = images[index]
im.add_mipmap(index, w, h, data)
'''
return [im]
# register
ImageLoader.register(ImageLoaderTex)
| mit |
zhjunlang/kbengine | kbe/res/scripts/common/Lib/test/test_urllib2_localnet.py | 60 | 26073 | import base64
import os
import email
import urllib.parse
import urllib.request
import http.server
import unittest
import hashlib
from test import support
threading = support.import_module('threading')
try:
import ssl
except ImportError:
ssl = None
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Loopback http server infrastructure
class LoopbackHttpServer(http.server.HTTPServer):
"""HTTP server w/ a few modifications that make it useful for
loopback testing purposes.
"""
def __init__(self, server_address, RequestHandlerClass):
http.server.HTTPServer.__init__(self,
server_address,
RequestHandlerClass)
# Set the timeout of our listening socket really low so
# that we can stop the server easily.
self.socket.settimeout(0.1)
def get_request(self):
"""HTTPServer method, overridden."""
request, client_address = self.socket.accept()
# It's a loopback connection, so setting the timeout
# really low shouldn't affect anything, but should make
# deadlocks less likely to occur.
request.settimeout(10.0)
return (request, client_address)
class LoopbackHttpServerThread(threading.Thread):
"""Stoppable thread that runs a loopback http server."""
def __init__(self, request_handler):
threading.Thread.__init__(self)
self._stop_server = False
self.ready = threading.Event()
request_handler.protocol_version = "HTTP/1.0"
self.httpd = LoopbackHttpServer(("127.0.0.1", 0),
request_handler)
self.port = self.httpd.server_port
def stop(self):
"""Stops the webserver if it's currently running."""
self._stop_server = True
self.join()
self.httpd.server_close()
def run(self):
self.ready.set()
while not self._stop_server:
self.httpd.handle_request()
# Authentication infrastructure
class DigestAuthHandler:
"""Handler for performing digest authentication."""
def __init__(self):
self._request_num = 0
self._nonces = []
self._users = {}
self._realm_name = "Test Realm"
self._qop = "auth"
def set_qop(self, qop):
self._qop = qop
def set_users(self, users):
assert isinstance(users, dict)
self._users = users
def set_realm(self, realm):
self._realm_name = realm
def _generate_nonce(self):
self._request_num += 1
nonce = hashlib.md5(str(self._request_num).encode("ascii")).hexdigest()
self._nonces.append(nonce)
return nonce
def _create_auth_dict(self, auth_str):
first_space_index = auth_str.find(" ")
auth_str = auth_str[first_space_index+1:]
parts = auth_str.split(",")
auth_dict = {}
for part in parts:
name, value = part.split("=")
name = name.strip()
if value[0] == '"' and value[-1] == '"':
value = value[1:-1]
else:
value = value.strip()
auth_dict[name] = value
return auth_dict
def _validate_auth(self, auth_dict, password, method, uri):
final_dict = {}
final_dict.update(auth_dict)
final_dict["password"] = password
final_dict["method"] = method
final_dict["uri"] = uri
HA1_str = "%(username)s:%(realm)s:%(password)s" % final_dict
HA1 = hashlib.md5(HA1_str.encode("ascii")).hexdigest()
HA2_str = "%(method)s:%(uri)s" % final_dict
HA2 = hashlib.md5(HA2_str.encode("ascii")).hexdigest()
final_dict["HA1"] = HA1
final_dict["HA2"] = HA2
response_str = "%(HA1)s:%(nonce)s:%(nc)s:" \
"%(cnonce)s:%(qop)s:%(HA2)s" % final_dict
response = hashlib.md5(response_str.encode("ascii")).hexdigest()
return response == auth_dict["response"]
def _return_auth_challenge(self, request_handler):
request_handler.send_response(407, "Proxy Authentication Required")
request_handler.send_header("Content-Type", "text/html")
request_handler.send_header(
'Proxy-Authenticate', 'Digest realm="%s", '
'qop="%s",'
'nonce="%s", ' % \
(self._realm_name, self._qop, self._generate_nonce()))
# XXX: Not sure if we're supposed to add this next header or
# not.
#request_handler.send_header('Connection', 'close')
request_handler.end_headers()
request_handler.wfile.write(b"Proxy Authentication Required.")
return False
def handle_request(self, request_handler):
"""Performs digest authentication on the given HTTP request
handler. Returns True if authentication was successful, False
otherwise.
If no users have been set, then digest auth is effectively
disabled and this method will always return True.
"""
if len(self._users) == 0:
return True
if "Proxy-Authorization" not in request_handler.headers:
return self._return_auth_challenge(request_handler)
else:
auth_dict = self._create_auth_dict(
request_handler.headers["Proxy-Authorization"]
)
if auth_dict["username"] in self._users:
password = self._users[ auth_dict["username"] ]
else:
return self._return_auth_challenge(request_handler)
if not auth_dict.get("nonce") in self._nonces:
return self._return_auth_challenge(request_handler)
else:
self._nonces.remove(auth_dict["nonce"])
auth_validated = False
# MSIE uses short_path in its validation, but Python's
# urllib.request uses the full path, so we're going to see if
# either of them works here.
for path in [request_handler.path, request_handler.short_path]:
if self._validate_auth(auth_dict,
password,
request_handler.command,
path):
auth_validated = True
if not auth_validated:
return self._return_auth_challenge(request_handler)
return True
class BasicAuthHandler(http.server.BaseHTTPRequestHandler):
"""Handler for performing basic authentication."""
# Server side values
USER = 'testUser'
PASSWD = 'testPass'
REALM = 'Test'
USER_PASSWD = "%s:%s" % (USER, PASSWD)
ENCODED_AUTH = base64.b64encode(USER_PASSWD.encode('ascii')).decode('ascii')
def __init__(self, *args, **kwargs):
http.server.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
# Suppress console log message
pass
def do_HEAD(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_AUTHHEAD(self):
self.send_response(401)
self.send_header("WWW-Authenticate", "Basic realm=\"%s\"" % self.REALM)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
if not self.headers.get("Authorization", ""):
self.do_AUTHHEAD()
self.wfile.write(b"No Auth header received")
elif self.headers.get(
"Authorization", "") == "Basic " + self.ENCODED_AUTH:
self.send_response(200)
self.end_headers()
self.wfile.write(b"It works")
else:
# Request Unauthorized
self.do_AUTHHEAD()
# Proxy test infrastructure
class FakeProxyHandler(http.server.BaseHTTPRequestHandler):
"""This is a 'fake proxy' that makes it look like the entire
internet has gone down due to a sudden zombie invasion. It main
utility is in providing us with authentication support for
testing.
"""
def __init__(self, digest_auth_handler, *args, **kwargs):
# This has to be set before calling our parent's __init__(), which will
# try to call do_GET().
self.digest_auth_handler = digest_auth_handler
http.server.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
# Uncomment the next line for debugging.
# sys.stderr.write(format % args)
pass
def do_GET(self):
(scm, netloc, path, params, query, fragment) = urllib.parse.urlparse(
self.path, "http")
self.short_path = path
if self.digest_auth_handler.handle_request(self):
self.send_response(200, "OK")
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write(bytes("You've reached %s!<BR>" % self.path,
"ascii"))
self.wfile.write(b"Our apologies, but our server is down due to "
b"a sudden zombie invasion.")
# Test cases
@unittest.skipUnless(threading, "Threading required for this test.")
class BasicAuthTests(unittest.TestCase):
USER = "testUser"
PASSWD = "testPass"
INCORRECT_PASSWD = "Incorrect"
REALM = "Test"
def setUp(self):
super(BasicAuthTests, self).setUp()
# With Basic Authentication
def http_server_with_basic_auth_handler(*args, **kwargs):
return BasicAuthHandler(*args, **kwargs)
self.server = LoopbackHttpServerThread(http_server_with_basic_auth_handler)
self.server_url = 'http://127.0.0.1:%s' % self.server.port
self.server.start()
self.server.ready.wait()
def tearDown(self):
self.server.stop()
super(BasicAuthTests, self).tearDown()
def test_basic_auth_success(self):
ah = urllib.request.HTTPBasicAuthHandler()
ah.add_password(self.REALM, self.server_url, self.USER, self.PASSWD)
urllib.request.install_opener(urllib.request.build_opener(ah))
try:
self.assertTrue(urllib.request.urlopen(self.server_url))
except urllib.error.HTTPError:
self.fail("Basic auth failed for the url: %s", self.server_url)
def test_basic_auth_httperror(self):
ah = urllib.request.HTTPBasicAuthHandler()
ah.add_password(self.REALM, self.server_url, self.USER, self.INCORRECT_PASSWD)
urllib.request.install_opener(urllib.request.build_opener(ah))
self.assertRaises(urllib.error.HTTPError, urllib.request.urlopen, self.server_url)
@unittest.skipUnless(threading, "Threading required for this test.")
class ProxyAuthTests(unittest.TestCase):
URL = "http://localhost"
USER = "tester"
PASSWD = "test123"
REALM = "TestRealm"
def setUp(self):
super(ProxyAuthTests, self).setUp()
self.digest_auth_handler = DigestAuthHandler()
self.digest_auth_handler.set_users({self.USER: self.PASSWD})
self.digest_auth_handler.set_realm(self.REALM)
# With Digest Authentication.
def create_fake_proxy_handler(*args, **kwargs):
return FakeProxyHandler(self.digest_auth_handler, *args, **kwargs)
self.server = LoopbackHttpServerThread(create_fake_proxy_handler)
self.server.start()
self.server.ready.wait()
proxy_url = "http://127.0.0.1:%d" % self.server.port
handler = urllib.request.ProxyHandler({"http" : proxy_url})
self.proxy_digest_handler = urllib.request.ProxyDigestAuthHandler()
self.opener = urllib.request.build_opener(
handler, self.proxy_digest_handler)
def tearDown(self):
self.server.stop()
super(ProxyAuthTests, self).tearDown()
def test_proxy_with_bad_password_raises_httperror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD+"bad")
self.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib.error.HTTPError,
self.opener.open,
self.URL)
def test_proxy_with_no_password_raises_httperror(self):
self.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib.error.HTTPError,
self.opener.open,
self.URL)
def test_proxy_qop_auth_works(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth")
result = self.opener.open(self.URL)
while result.read():
pass
result.close()
def test_proxy_qop_auth_int_works_or_throws_urlerror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth-int")
try:
result = self.opener.open(self.URL)
except urllib.error.URLError:
# It's okay if we don't support auth-int, but we certainly
# shouldn't receive any kind of exception here other than
# a URLError.
result = None
if result:
while result.read():
pass
result.close()
def GetRequestHandler(responses):
class FakeHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
server_version = "TestHTTP/"
requests = []
headers_received = []
port = 80
def do_GET(self):
body = self.send_head()
while body:
done = self.wfile.write(body)
body = body[done:]
def do_POST(self):
content_length = self.headers["Content-Length"]
post_data = self.rfile.read(int(content_length))
self.do_GET()
self.requests.append(post_data)
def send_head(self):
FakeHTTPRequestHandler.headers_received = self.headers
self.requests.append(self.path)
response_code, headers, body = responses.pop(0)
self.send_response(response_code)
for (header, value) in headers:
self.send_header(header, value % {'port':self.port})
if body:
self.send_header("Content-type", "text/plain")
self.end_headers()
return body
self.end_headers()
def log_message(self, *args):
pass
return FakeHTTPRequestHandler
@unittest.skipUnless(threading, "Threading required for this test.")
class TestUrlopen(unittest.TestCase):
"""Tests urllib.request.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
"""
def setUp(self):
super(TestUrlopen, self).setUp()
# Ignore proxies for localhost tests.
self.old_environ = os.environ.copy()
os.environ['NO_PROXY'] = '*'
self.server = None
def tearDown(self):
if self.server is not None:
self.server.stop()
os.environ.clear()
os.environ.update(self.old_environ)
super(TestUrlopen, self).tearDown()
def urlopen(self, url, data=None, **kwargs):
l = []
f = urllib.request.urlopen(url, data, **kwargs)
try:
# Exercise various methods
l.extend(f.readlines(200))
l.append(f.readline())
l.append(f.read(1024))
l.append(f.read())
finally:
f.close()
return b"".join(l)
def start_server(self, responses=None):
if responses is None:
responses = [(200, [], b"we don't care")]
handler = GetRequestHandler(responses)
self.server = LoopbackHttpServerThread(handler)
self.server.start()
self.server.ready.wait()
port = self.server.port
handler.port = port
return handler
def start_https_server(self, responses=None, **kwargs):
if not hasattr(urllib.request, 'HTTPSHandler'):
self.skipTest('ssl support required')
from test.ssl_servers import make_https_server
if responses is None:
responses = [(200, [], b"we care a bit")]
handler = GetRequestHandler(responses)
server = make_https_server(self, handler_class=handler, **kwargs)
handler.port = server.port
return handler
def test_redirection(self):
expected_response = b"We got here..."
responses = [
(302, [("Location", "http://localhost:%(port)s/somewhere_else")],
""),
(200, [], expected_response)
]
handler = self.start_server(responses)
data = self.urlopen("http://localhost:%s/" % handler.port)
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/", "/somewhere_else"])
def test_chunked(self):
expected_response = b"hello world"
chunked_start = (
b'a\r\n'
b'hello worl\r\n'
b'1\r\n'
b'd\r\n'
b'0\r\n'
)
response = [(200, [("Transfer-Encoding", "chunked")], chunked_start)]
handler = self.start_server(response)
data = self.urlopen("http://localhost:%s/" % handler.port)
self.assertEqual(data, expected_response)
def test_404(self):
expected_response = b"Bad bad bad..."
handler = self.start_server([(404, [], expected_response)])
try:
self.urlopen("http://localhost:%s/weeble" % handler.port)
except urllib.error.URLError as f:
data = f.read()
f.close()
else:
self.fail("404 should raise URLError")
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/weeble"])
def test_200(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = self.urlopen("http://localhost:%s/bizarre" % handler.port)
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/bizarre"])
def test_200_with_parameters(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = self.urlopen("http://localhost:%s/bizarre" % handler.port,
b"get=with_feeling")
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/bizarre", b"get=with_feeling"])
def test_https(self):
handler = self.start_https_server()
data = self.urlopen("https://localhost:%s/bizarre" % handler.port)
self.assertEqual(data, b"we care a bit")
def test_https_with_cafile(self):
handler = self.start_https_server(certfile=CERT_localhost)
import ssl
# Good cert
data = self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_localhost)
self.assertEqual(data, b"we care a bit")
# Bad cert
with self.assertRaises(urllib.error.URLError) as cm:
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_fakehostname)
# Good cert, but mismatching hostname
handler = self.start_https_server(certfile=CERT_fakehostname)
with self.assertRaises(ssl.CertificateError) as cm:
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_fakehostname)
def test_https_with_cadefault(self):
handler = self.start_https_server(certfile=CERT_localhost)
# Self-signed cert should fail verification with system certificate store
with self.assertRaises(urllib.error.URLError) as cm:
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cadefault=True)
def test_https_sni(self):
if ssl is None:
self.skipTest("ssl module required")
if not ssl.HAS_SNI:
self.skipTest("SNI support required in OpenSSL")
sni_name = None
def cb_sni(ssl_sock, server_name, initial_context):
nonlocal sni_name
sni_name = server_name
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.set_servername_callback(cb_sni)
handler = self.start_https_server(context=context, certfile=CERT_localhost)
self.urlopen("https://localhost:%s" % handler.port)
self.assertEqual(sni_name, "localhost")
def test_sending_headers(self):
handler = self.start_server()
req = urllib.request.Request("http://localhost:%s/" % handler.port,
headers={"Range": "bytes=20-39"})
urllib.request.urlopen(req)
self.assertEqual(handler.headers_received["Range"], "bytes=20-39")
def test_basic(self):
handler = self.start_server()
open_url = urllib.request.urlopen("http://localhost:%s" % handler.port)
for attr in ("read", "close", "info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
try:
self.assertTrue(open_url.read(), "calling 'read' failed")
finally:
open_url.close()
def test_info(self):
handler = self.start_server()
try:
open_url = urllib.request.urlopen(
"http://localhost:%s" % handler.port)
info_obj = open_url.info()
self.assertIsInstance(info_obj, email.message.Message,
"object returned by 'info' is not an "
"instance of email.message.Message")
self.assertEqual(info_obj.get_content_subtype(), "plain")
finally:
self.server.stop()
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
handler = self.start_server()
open_url = urllib.request.urlopen("http://localhost:%s" % handler.port)
url = open_url.geturl()
self.assertEqual(url, "http://localhost:%s" % handler.port)
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
# as indicated by the comment below, this might fail with some ISP,
# so we run the test only when -unetwork/-uall is specified to
# mitigate the problem a bit (see #17564)
support.requires('network')
self.assertRaises(OSError,
# Given that both VeriSign and various ISPs have in
# the past or are presently hijacking various invalid
# domain name requests in an attempt to boost traffic
# to their own sites, finding a domain name to use
# for this test is difficult. RFC2606 leads one to
# believe that '.invalid' should work, but experience
# seemed to indicate otherwise. Single character
# TLDs are likely to remain invalid, so this seems to
# be the best choice. The trailing '.' prevents a
# related problem: The normal DNS resolver appends
# the domain names from the search path if there is
# no '.' the end and, and if one of those domains
# implements a '*' rule a result is returned.
# However, none of this will prevent the test from
# failing if the ISP hijacks all invalid domain
# requests. The real solution would be to be able to
# parameterize the framework with a mock resolver.
urllib.request.urlopen,
"http://sadflkjsasf.i.nvali.d./")
def test_iteration(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = urllib.request.urlopen("http://localhost:%s" % handler.port)
for line in data:
self.assertEqual(line, expected_response)
def test_line_iteration(self):
lines = [b"We\n", b"got\n", b"here\n", b"verylong " * 8192 + b"\n"]
expected_response = b"".join(lines)
handler = self.start_server([(200, [], expected_response)])
data = urllib.request.urlopen("http://localhost:%s" % handler.port)
for index, line in enumerate(data):
self.assertEqual(line, lines[index],
"Fetched line number %s doesn't match expected:\n"
" Expected length was %s, got %s" %
(index, len(lines[index]), len(line)))
self.assertEqual(index + 1, len(lines))
threads_key = None
def setUpModule():
# Store the threading_setup in a key and ensure that it is cleaned up
# in the tearDown
global threads_key
threads_key = support.threading_setup()
def tearDownModule():
if threads_key:
support.threading_cleanup(threads_key)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
aleju/self-driving-truck | lib/plotting.py | 1 | 13772 | """Classes to handle plotting during the training."""
from __future__ import print_function, division
import math
import cPickle as pickle
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
import time
GROWTH_BY = 500
class History(object):
def __init__(self):
self.line_groups = OrderedDict()
@staticmethod
def from_string(s):
return pickle.loads(s)
def to_string(self):
return pickle.dumps(self, protocol=-1)
@staticmethod
def load_from_filepath(fp):
#return json.loads(open(, "r").read())
with open(fp, "r") as f:
history = pickle.load(f)
return history
def save_to_filepath(self, fp):
with open(fp, "w") as f:
pickle.dump(self, f, protocol=-1)
def add_group(self, group_name, line_names, increasing=True):
self.line_groups[group_name] = LineGroup(group_name, line_names, increasing=increasing)
def add_value(self, group_name, line_name, x, y, average=False):
self.line_groups[group_name].lines[line_name].append(x, y, average=average)
def get_group_names(self):
return list(self.line_groups.iterkeys())
def get_groups_increasing(self):
return [group.increasing for group in self.line_groups.itervalues()]
def get_max_x(self):
return max([group.get_max_x() for group in self.line_groups.itervalues()])
def get_recent_average(self, group_name, line_name, nb_points):
ys = self.line_groups[group_name].lines[line_name].ys[-nb_points:]
return np.average(ys)
class LineGroup(object):
def __init__(self, group_name, line_names, increasing=True):
self.group_name = group_name
self.lines = OrderedDict([(name, Line()) for name in line_names])
self.increasing = increasing
self.xlim = (None, None)
def get_line_names(self):
return list(self.lines.iterkeys())
def get_line_xs(self):
#return [line.xs for line in self.lines.itervalues()]
"""
for key, line in self.lines.items():
if not hasattr(line, "last_index"):
print(self.group_name, key, "no last index")
else:
print(self.group_name, key, "OK")
print(type(line.xs), type(line.ys), type(line.counts), type(line.datetimes))
"""
return [line.get_xs() for line in self.lines.itervalues()]
def get_line_ys(self):
#return [line.ys for line in self.lines.itervalues()]
return [line.get_ys() for line in self.lines.itervalues()]
def get_max_x(self):
#return max([max(line.xs) if len(line.xs) > 0 else 0 for line in self.lines.itervalues()])
return max([np.maximum(line.get_xs()) if line.last_index > -1 else 0 for line in self.lines.itervalues()])
"""
class Line(object):
def __init__(self, xs=None, ys=None, counts=None, datetimes=None):
self.xs = xs if xs is not None else []
self.ys = ys if ys is not None else []
self.counts = counts if counts is not None else []
self.datetimes = datetimes if datetimes is not None else []
self.last_index = -1
def append(self, x, y, average=False):
# legacy (for loading from pickle)
#if not hasattr(self, "counts"):
# self.counts = [1] * len(self.xs)
# ---
if not average or len(self.xs) == 0 or self.xs[-1] != x:
self.xs.append(x)
self.ys.append(float(y)) # float to get rid of numpy
self.counts.append(1)
self.datetimes.append(time.time())
else:
count = self.counts[-1]
self.ys[-1] = ((self.ys[-1] * count) + y) / (count+1)
self.counts[-1] += 1
self.datetimes[-1] = time.time()
"""
class Line(object):
def __init__(self, xs=None, ys=None, counts=None, datetimes=None):
zeros = np.tile(np.array([0], dtype=np.int32), GROWTH_BY)
self.xs = xs if xs is not None else np.copy(zeros)
self.ys = ys if ys is not None else zeros.astype(np.float32)
self.counts = counts if counts is not None else zeros.astype(np.uint16)
self.datetimes = datetimes if datetimes is not None else zeros.astype(np.uint64)
self.last_index = -1
# for legacy as functions, replace with properties
def get_xs(self):
# legacy
if isinstance(self.xs, list):
self._legacy_convert_from_list_to_np()
return self.xs[0:self.last_index+1]
def get_ys(self):
return self.ys[0:self.last_index+1]
def get_counts(self):
return self.counts[0:self.last_index+1]
def get_datetimes(self):
return self.datetimes[0:self.last_index+1]
def _legacy_convert_from_list_to_np(self):
#print("is list!")
print("[plotting] Converting from list to numpy...")
self.last_index = len(self.xs) - 1
self.xs = np.array(self.xs, dtype=np.int32)
self.ys = np.array(self.ys, dtype=np.float32)
self.counts = np.array(self.counts, dtype=np.uint16)
self.datetimes = np.array([int(dt*1000) for dt in self.datetimes], dtype=np.uint64)
def append(self, x, y, average=False):
# legacy (for loading from pickle)
#if not hasattr(self, "counts"):
# self.counts = [1] * len(self.xs)
# ---
#legacy
if isinstance(self.xs, list):
self._legacy_convert_from_list_to_np()
if (self.last_index+1) == self.xs.shape[0]:
#print("growing from %d by %d..." % (self.xs.shape[0], GROWTH_BY), self.xs.shape, self.ys.shape, self.counts.shape, self.datetimes.shape)
zeros = np.tile(np.array([0], dtype=np.int32), GROWTH_BY)
self.xs = np.append(self.xs, np.copy(zeros))
self.ys = np.append(self.ys, zeros.astype(np.float32))
self.counts = np.append(self.counts, zeros.astype(np.uint16))
self.datetimes = np.append(self.datetimes, zeros.astype(np.uint64))
#print("growing done", self.xs.shape, self.ys.shape, self.counts.shape, self.datetimes.shape)
first_entry = (self.last_index == -1)
if not average or first_entry or self.xs[self.last_index] != x:
idx = self.last_index + 1
self.xs[idx] = x
self.ys[idx] = y
self.counts[idx] = 1
self.datetimes[idx] = int(time.time()*1000)
self.last_index = idx
else:
idx = self.last_index
count = self.counts[idx]
self.ys[idx] = ((self.ys[idx] * count) + y) / (count+1)
self.counts[idx] = count + 1
self.datetimes[idx] = int(time.time()*1000)
#print("added", x, y, average)
#print(self.xs[self.last_index-10:self.last_index+10+1])
#print(self.ys[self.last_index-10:self.last_index+10+1])
#print(self.counts[self.last_index-10:self.last_index+10+1])
#print(self.datetimes[self.last_index-10:self.last_index+10+1])
class LossPlotter(object):
def __init__(self, titles, increasing, save_to_fp):
assert len(titles) == len(increasing)
n_plots = len(titles)
self.titles = titles
self.increasing = dict([(title, incr) for title, incr in zip(titles, increasing)])
self.xlim = dict([(title, (None, None)) for title in titles])
self.colors = ["red", "blue", "cyan", "magenta", "orange", "black"]
self.nb_points_max = 500
self.save_to_fp = save_to_fp
self.start_batch_idx = 0
self.autolimit_y = False
self.autolimit_y_multiplier = 5
#self.fig, self.axes = plt.subplots(nrows=2, ncols=2, figsize=(20, 20))
nrows = max(1, int(math.sqrt(n_plots)))
ncols = int(math.ceil(n_plots / nrows))
width = ncols * 10
height = nrows * 10
self.fig, self.axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(width, height))
if nrows == 1 and ncols == 1:
self.axes = [self.axes]
else:
self.axes = self.axes.flat
title_to_ax = dict()
for idx, (title, ax) in enumerate(zip(self.titles, self.axes)):
title_to_ax[title] = ax
self.title_to_ax = title_to_ax
self.fig.tight_layout()
self.fig.subplots_adjust(left=0.05)
def plot(self, history):
for plot_idx, title in enumerate(self.titles):
ax = self.title_to_ax[title]
group_name = title
group_increasing = self.increasing[title]
group = history.line_groups[title]
line_names = group.get_line_names()
#print("getting line x/y...", time.time())
line_xs = group.get_line_xs()
line_ys = group.get_line_ys()
#print("getting line x/y FIN", time.time())
"""
print("title", title)
print("line_names", line_names)
for i, xx in enumerate(line_xs):
print("line_xs i: ", xx)
for i, yy in enumerate(line_ys):
print("line_ys i: ", yy)
"""
if any([len(xx) > 0 for xx in line_xs]):
xs_min = min([min(xx) for xx in line_xs if len(xx) > 0])
xs_max = max([max(xx) for xx in line_xs if len(xx) > 0])
xlim = self.xlim[title]
xlim = [
max(xs_min, self.start_batch_idx) if xlim[0] is None else min(xlim[0], xs_max-1),
xs_max+1 if xlim[1] is None else xlim[1]
]
if xlim[0] < 0:
xlim[0] = max(xs_max - abs(xlim[0]), 0)
if xlim[1] < 0:
xlim[1] = max(xs_max - abs(xlim[1]), 1)
else:
# none of the lines has any value, so just use dummy values
# to avoid min/max of empty sequence errors
xlim = [
0 if self.xlim[title][0] is None else self.xlim[title][0],
1 if self.xlim[title][1] is None else self.xlim[title][1]
]
self._plot_group(ax, group_name, group_increasing, line_names, line_xs, line_ys, xlim)
self.fig.savefig(self.save_to_fp)
# this seems to be slow sometimes
def _line_to_xy(self, line_x, line_y, xlim, limit_y_min=None, limit_y_max=None):
def _add_point(points_x, points_y, curr_sum, counter):
points_x.append(batch_idx)
y = curr_sum / counter
if limit_y_min is not None and limit_y_max is not None:
y = np.clip(y, limit_y_min, limit_y_max)
elif limit_y_min is not None:
y = max(y, limit_y_min)
elif limit_y_max is not None:
y = min(y, limit_y_max)
points_y.append(y)
nb_points = 0
for i in range(len(line_x)):
batch_idx = line_x[i]
if xlim[0] <= batch_idx < xlim[1]:
nb_points += 1
point_every = max(1, int(nb_points / self.nb_points_max))
points_x = []
points_y = []
curr_sum = 0
counter = 0
for i in range(len(line_x)):
batch_idx = line_x[i]
if xlim[0] <= batch_idx < xlim[1]:
curr_sum += line_y[i]
counter += 1
if counter >= point_every:
_add_point(points_x, points_y, curr_sum, counter)
counter = 0
curr_sum = 0
if counter > 0:
_add_point(points_x, points_y, curr_sum, counter)
return points_x, points_y
def _plot_group(self, ax, group_name, group_increasing, line_names, line_xs, line_ys, xlim):
ax.cla()
ax.grid()
if self.autolimit_y and any([len(line_xs) > 0 for line_xs in line_xs]):
min_x = min([np.min(line_x) for line_x in line_xs])
max_x = max([np.max(line_x) for line_x in line_xs])
min_y = min([np.min(line_y) for line_y in line_ys])
max_y = max([np.max(line_y) for line_y in line_ys])
if group_increasing:
if max_y > 0:
limit_y_max = None
limit_y_min = max_y / self.autolimit_y_multiplier
if min_y > limit_y_min:
limit_y_min = None
else:
if min_y > 0:
limit_y_max = min_y * self.autolimit_y_multiplier
limit_y_min = None
if max_y < limit_y_max:
limit_y_max = None
if limit_y_min is not None:
ax.plot((min_x, max_x), (limit_y_min, limit_y_min), c="purple")
if limit_y_max is not None:
ax.plot((min_x, max_x), (limit_y_max, limit_y_max), c="purple")
# y achse range begrenzen
yaxmin = min_y if limit_y_min is None else limit_y_min
yaxmax = max_y if limit_y_max is None else limit_y_max
yrange = yaxmax - yaxmin
yaxmin = yaxmin - (0.05 * yrange)
yaxmax = yaxmax + (0.05 * yrange)
ax.set_ylim([yaxmin, yaxmax])
else:
limit_y_min = None
limit_y_max = None
for line_name, line_x, line_y, line_col in zip(line_names, line_xs, line_ys, self.colors):
#print("line to xy...", time.time())
x, y = self._line_to_xy(line_x, line_y, xlim, limit_y_min=limit_y_min, limit_y_max=limit_y_max)
#print("line to xy FIN", time.time())
#print("plotting ax...", time.time())
ax.plot(x, y, color=line_col, linewidth=1.0)
#print("plotting ax FIN", time.time())
ax.set_title(group_name)
| mit |
fldc/CouchPotatoServer | libs/dateutil/rrule.py | 214 | 41036 | """
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard Python
datetime module.
"""
__license__ = "Simplified BSD"
import itertools
import datetime
import calendar
try:
import _thread
except ImportError:
import thread as _thread
import sys
from six import advance_iterator, integer_types
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30+
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = list(range(7))
# Imported on demand.
easter = None
parser = None
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
if n == 0:
raise ValueError("Can't create weekday with n == 0")
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class rrulebase(object):
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = _thread.allocate_lock()
self._cache_gen = self._iter()
self._cache_complete = False
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(advance_iterator(gen))
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxsize,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = advance_iterator(gen)
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
if self._len is None:
for x in self: pass
return self._len
def before(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def between(self, after, before, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
super(rrule, self).__init__(cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if wkst is None:
self._wkst = calendar.firstweekday()
elif isinstance(wkst, integer_types):
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif isinstance(bysetpos, integer_types):
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if not (byweekno or byyearday or bymonthday or
byweekday is not None or byeaster is not None):
if freq == YEARLY:
if not bymonth:
bymonth = dtstart.month
bymonthday = dtstart.day
elif freq == MONTHLY:
bymonthday = dtstart.day
elif freq == WEEKLY:
byweekday = dtstart.weekday()
# bymonth
if not bymonth:
self._bymonth = None
elif isinstance(bymonth, integer_types):
self._bymonth = (bymonth,)
else:
self._bymonth = tuple(bymonth)
# byyearday
if not byyearday:
self._byyearday = None
elif isinstance(byyearday, integer_types):
self._byyearday = (byyearday,)
else:
self._byyearday = tuple(byyearday)
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if isinstance(byeaster, integer_types):
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(byeaster)
else:
self._byeaster = None
# bymonthay
if not bymonthday:
self._bymonthday = ()
self._bynmonthday = ()
elif isinstance(bymonthday, integer_types):
if bymonthday < 0:
self._bynmonthday = (bymonthday,)
self._bymonthday = ()
else:
self._bymonthday = (bymonthday,)
self._bynmonthday = ()
else:
self._bymonthday = tuple([x for x in bymonthday if x > 0])
self._bynmonthday = tuple([x for x in bymonthday if x < 0])
# byweekno
if byweekno is None:
self._byweekno = None
elif isinstance(byweekno, integer_types):
self._byweekno = (byweekno,)
else:
self._byweekno = tuple(byweekno)
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
elif isinstance(byweekday, integer_types):
self._byweekday = (byweekday,)
self._bynweekday = None
elif hasattr(byweekday, "n"):
if not byweekday.n or freq > MONTHLY:
self._byweekday = (byweekday.weekday,)
self._bynweekday = None
else:
self._bynweekday = ((byweekday.weekday, byweekday.n),)
self._byweekday = None
else:
self._byweekday = []
self._bynweekday = []
for wday in byweekday:
if isinstance(wday, integer_types):
self._byweekday.append(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.append(wday.weekday)
else:
self._bynweekday.append((wday.weekday, wday.n))
self._byweekday = tuple(self._byweekday)
self._bynweekday = tuple(self._bynweekday)
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = (dtstart.hour,)
else:
self._byhour = None
elif isinstance(byhour, integer_types):
self._byhour = (byhour,)
else:
self._byhour = tuple(byhour)
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = (dtstart.minute,)
else:
self._byminute = None
elif isinstance(byminute, integer_types):
self._byminute = (byminute,)
else:
self._byminute = tuple(byminute)
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = (dtstart.second,)
else:
self._bysecond = None
elif isinstance(bysecond, integer_types):
self._bysecond = (bysecond,)
else:
self._bysecond = tuple(bysecond)
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY:ii.ydayset,
MONTHLY:ii.mdayset,
WEEKLY:ii.wdayset,
DAILY:ii.ddayset,
HOURLY:ii.ddayset,
MINUTELY:ii.ddayset,
SECONDLY:ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY:ii.htimeset,
MINUTELY:ii.mtimeset,
SECONDLY:ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and second not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday
and -ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday
and -ii.nextyearlen+i-ii.yearlen
not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal+i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
while True:
hour += interval
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if not byhour or hour in byhour:
break
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
while True:
minute += interval
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
filtered = False
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute)):
break
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399-(hour*3600+minute*60+second))
//interval)*interval)
while True:
second += self._interval
div, mod = divmod(second, 60)
if div:
second = mod
minute += div
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
break
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365+calendar.isleap(year)
self.nextyearlen = 365+calendar.isleap(year+1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
#no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1, 1, 1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst)%7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen+
(lyearweekday-rr._wkst)%7)%7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst)%7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and
(month != self.lastmonth or year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday)%7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday)%7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return list(range(self.yearlen)), 0, self.yearlen
def mdayset(self, year, month, day):
set = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
set[i] = i
return set, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
set = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
set[i] = i
i += 1
#if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return set, start, i
def ddayset(self, year, month, day):
set = [None]*self.yearlen
i = datetime.date(year, month, day).toordinal()-self.yearordinal
set[i] = i
return set, i, i+1
def htimeset(self, hour, minute, second):
set = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
set.sort()
return set
def mtimeset(self, hour, minute, second):
set = []
rr = self.rrule
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
set.sort()
return set
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
class _genitem(object):
def __init__(self, genlist, gen):
try:
self.dt = advance_iterator(gen)
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def __next__(self):
try:
self.dt = advance_iterator(self.gen)
except StopIteration:
self.genlist.remove(self)
next = __next__
def __lt__(self, other):
return self.dt < other.dt
def __gt__(self, other):
return self.dt > other.dt
def __eq__(self, other):
return self.dt == other.dt
def __ne__(self, other):
return self.dt != other.dt
def __init__(self, cache=False):
super(rruleset, self).__init__(cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
def rrule(self, rrule):
self._rrule.append(rrule)
def rdate(self, rdate):
self._rdate.append(rdate)
def exrule(self, exrule):
self._exrule.append(exrule)
def exdate(self, exdate):
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate))
for gen in [iter(x) for x in self._rrule]:
self._genitem(rlist, gen)
rlist.sort()
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate))
for gen in [iter(x) for x in self._exrule]:
self._genitem(exlist, gen)
exlist.sort()
lastdt = None
total = 0
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
advance_iterator(exlist[0])
exlist.sort()
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
advance_iterator(ritem)
rlist.sort()
self._len = total
class _rrulestr(object):
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO":0,"TU":1,"WE":2,"TH":3,"FR":4,"SA":5,"SU":6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError("invalid until date")
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg):
l = []
for wday in value.split(','):
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n: n = int(n)
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError("unknown parameter name")
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError("unknown parameter '%s'" % name)
except (KeyError, ValueError):
raise ValueError("invalid '%s': %s" % (name, value))
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
s = s.upper()
if not s.strip():
raise ValueError("empty string")
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and
(s.find(':') == -1 or s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError("unsupported RRULE parm: "+parm)
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: "+parm)
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError("unsupported EXRULE parm: "+parm)
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: "+parm)
exdatevals.append(value)
elif name == "DTSTART":
for parm in parms:
raise ValueError("unsupported DTSTART parm: "+parm)
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
raise ValueError("unsupported property: "+name)
if (forceset or len(rrulevals) > 1 or
rdatevals or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
set = rruleset(cache=cache)
for value in rrulevals:
set.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
set.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
set.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
for datestr in value.split(','):
set.exdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
if compatible and dtstart:
set.rdate(dtstart)
return set
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
| gpl-3.0 |
sugartom/tensorflow-alien | tensorflow/python/ops/data_flow_ops.py | 15 | 64923 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
"""Data Flow Operations."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
import re
import threading
import six
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_data_flow_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util.deprecation import deprecated
def _as_type_list(dtypes):
"""Convert dtypes to a list of types."""
assert dtypes is not None
if not (isinstance(dtypes, list) or isinstance(dtypes, tuple)):
# We have a single type.
return [dtypes]
else:
# We have a list or tuple of types.
return list(dtypes)
def _as_shape_list(shapes, dtypes, unknown_dim_allowed=False,
unknown_rank_allowed=False):
"""Convert shapes to a list of tuples of int (or None)."""
if unknown_dim_allowed:
if (not isinstance(shapes, collections.Sequence)
or not shapes
or any(shape is None or isinstance(shape, int) for shape in shapes)):
raise ValueError(
"When providing partial shapes, a list of shapes must be provided.")
if shapes is None: return None
if isinstance(shapes, tensor_shape.TensorShape):
shapes = [shapes]
if not isinstance(shapes, (tuple, list)):
raise TypeError(
"shapes must be a TensorShape or a list or tuple of TensorShapes.")
if all(shape is None or isinstance(shape, int) for shape in shapes):
# We have a single shape.
shapes = [shapes]
shapes = [tensor_shape.as_shape(shape) for shape in shapes]
if not unknown_dim_allowed:
if any([not shape.is_fully_defined() for shape in shapes]):
raise ValueError("All shapes must be fully defined: %s" % shapes)
if not unknown_rank_allowed:
if any([shape.dims is None for shape in shapes]):
raise ValueError("All shapes must have a defined rank: %s" % shapes)
return shapes
def _as_name_list(names, dtypes):
if names is None:
return None
if not isinstance(names, (list, tuple)):
names = [names]
if len(names) != len(dtypes):
raise ValueError("List of names must have the same length as the list "
"of dtypes")
return list(names)
def _shape_common(s1, s2):
"""The greatest lower bound (ordered by specificity) TensorShape."""
s1 = tensor_shape.TensorShape(s1)
s2 = tensor_shape.TensorShape(s2)
if s1.ndims is None or s2.ndims is None or s1.ndims != s2.ndims:
return tensor_shape.unknown_shape()
d = [
d1 if d1 is not None and d1 == d2 else None
for (d1, d2) in zip(s1.as_list(), s2.as_list())]
return tensor_shape.TensorShape(d)
# pylint: disable=protected-access
class QueueBase(object):
"""Base class for queue implementations.
A queue is a TensorFlow data structure that stores tensors across
multiple steps, and exposes operations that enqueue and dequeue
tensors.
Each queue element is a tuple of one or more tensors, where each
tuple component has a static dtype, and may have a static shape. The
queue implementations support versions of enqueue and dequeue that
handle single elements, versions that support enqueuing and
dequeuing a batch of elements at once.
See @{tf.FIFOQueue} and
@{tf.RandomShuffleQueue} for concrete
implementations of this class, and instructions on how to create
them.
"""
def __init__(self, dtypes, shapes, names, queue_ref):
"""Constructs a queue object from a queue reference.
The two optional lists, `shapes` and `names`, must be of the same length
as `dtypes` if provided. The values at a given index `i` indicate the
shape and name to use for the corresponding queue component in `dtypes`.
Args:
dtypes: A list of types. The length of dtypes must equal the number
of tensors in each element.
shapes: Constraints on the shapes of tensors in an element:
A list of shape tuples or None. This list is the same length
as dtypes. If the shape of any tensors in the element are constrained,
all must be; shapes can be None if the shapes should not be constrained.
names: Optional list of names. If provided, the `enqueue()` and
`dequeue()` methods will use dictionaries with these names as keys.
Must be None or a list or tuple of the same length as `dtypes`.
queue_ref: The queue reference, i.e. the output of the queue op.
Raises:
ValueError: If one of the arguments is invalid.
"""
self._dtypes = dtypes
if shapes is not None:
if len(shapes) != len(dtypes):
raise ValueError("Queue shapes must have the same length as dtypes")
self._shapes = [tensor_shape.TensorShape(s) for s in shapes]
else:
self._shapes = [tensor_shape.unknown_shape() for _ in self._dtypes]
if names is not None:
if len(names) != len(dtypes):
raise ValueError("Queue names must have the same length as dtypes")
self._names = names
else:
self._names = None
self._queue_ref = queue_ref
self._name = self._queue_ref.op.name.split("/")[-1]
@staticmethod
def from_list(index, queues):
"""Create a queue using the queue reference from `queues[index]`.
Args:
index: An integer scalar tensor that determines the input that gets
selected.
queues: A list of `QueueBase` objects.
Returns:
A `QueueBase` object.
Raises:
TypeError: When `queues` is not a list of `QueueBase` objects,
or when the data types of `queues` are not all the same.
"""
if ((not queues) or
(not isinstance(queues, list)) or
(not all(isinstance(x, QueueBase) for x in queues))):
raise TypeError("A list of queues expected")
dtypes = queues[0].dtypes
if not all([dtypes == q.dtypes for q in queues[1:]]):
raise TypeError("Queues do not have matching component dtypes.")
names = queues[0].names
if not all([names == q.names for q in queues[1:]]):
raise TypeError("Queues do not have matching component names.")
queue_shapes = [q.shapes for q in queues]
reduced_shapes = [
six.moves.reduce(_shape_common, s) for s in zip(*queue_shapes)]
queue_refs = array_ops.stack([x.queue_ref for x in queues])
selected_queue = array_ops.gather(queue_refs, index)
return QueueBase(dtypes=dtypes, shapes=reduced_shapes, names=names,
queue_ref=selected_queue)
@property
def queue_ref(self):
"""The underlying queue reference."""
return self._queue_ref
@property
def name(self):
"""The name of the underlying queue."""
return self._queue_ref.op.name
@property
def dtypes(self):
"""The list of dtypes for each component of a queue element."""
return self._dtypes
@property
def shapes(self):
"""The list of shapes for each component of a queue element."""
return self._shapes
@property
def names(self):
"""The list of names for each component of a queue element."""
return self._names
def _check_enqueue_dtypes(self, vals):
"""Validate and convert `vals` to a list of `Tensor`s.
The `vals` argument can be a Tensor, a list or tuple of tensors, or a
dictionary with tensor values.
If it is a dictionary, the queue must have been constructed with a
`names` attribute and the dictionary keys must match the queue names.
If the queue was constructed with a `names` attribute, `vals` must
be a dictionary.
Args:
vals: A tensor, a list or tuple of tensors, or a dictionary..
Returns:
A list of `Tensor` objects.
Raises:
ValueError: If `vals` is invalid.
"""
if isinstance(vals, dict):
if not self._names:
raise ValueError("Queue must have names to enqueue a dictionary")
if sorted(self._names) != sorted(vals.keys()):
raise ValueError("Keys in dictionary to enqueue do not match "
"names of Queue. Dictionary: (%s), Queue: (%s)" %
(sorted(vals.keys()), sorted(self._names)))
# The order of values in `self._names` indicates the order in which the
# tensors in the dictionary `vals` must be listed.
vals = [vals[k] for k in self._names]
else:
if self._names:
raise ValueError("You must enqueue a dictionary in a Queue with names")
if not isinstance(vals, (list, tuple)):
vals = [vals]
tensors = []
for i, (val, dtype) in enumerate(zip(vals, self._dtypes)):
tensors.append(ops.convert_to_tensor(val, dtype=dtype,
name="component_%d" % i))
return tensors
def _scope_vals(self, vals):
"""Return a list of values to pass to `name_scope()`.
Args:
vals: A tensor, a list or tuple of tensors, or a dictionary.
Returns:
The values in vals as a list.
"""
if isinstance(vals, (list, tuple)):
return vals
elif isinstance(vals, dict):
return vals.values()
else:
return [vals]
def enqueue(self, vals, name=None):
"""Enqueues one element to this queue.
If the queue is full when this operation executes, it will block
until the element has been enqueued.
At runtime, this operation may raise an error if the queue is
@{tf.QueueBase.close} before or during its execution. If the
queue is closed before this operation runs,
`tf.errors.CancelledError` will be raised. If this operation is
blocked, and either (i) the queue is closed by a close operation
with `cancel_pending_enqueues=True`, or (ii) the session is
@{tf.Session.close},
`tf.errors.CancelledError` will be raised.
Args:
vals: A tensor, a list or tuple of tensors, or a dictionary containing
the values to enqueue.
name: A name for the operation (optional).
Returns:
The operation that enqueues a new tuple of tensors to the queue.
"""
with ops.name_scope(name, "%s_enqueue" % self._name,
self._scope_vals(vals)) as scope:
vals = self._check_enqueue_dtypes(vals)
# NOTE(mrry): Not using a shape function because we need access to
# the `QueueBase` object.
for val, shape in zip(vals, self._shapes):
val.get_shape().assert_is_compatible_with(shape)
if self._queue_ref.dtype == _dtypes.resource:
return gen_data_flow_ops._queue_enqueue_v2(
self._queue_ref, vals, name=scope)
else:
return gen_data_flow_ops._queue_enqueue(
self._queue_ref, vals, name=scope)
def enqueue_many(self, vals, name=None):
"""Enqueues zero or more elements to this queue.
This operation slices each component tensor along the 0th dimension to
make multiple queue elements. All of the tensors in `vals` must have the
same size in the 0th dimension.
If the queue is full when this operation executes, it will block
until all of the elements have been enqueued.
At runtime, this operation may raise an error if the queue is
@{tf.QueueBase.close} before or during its execution. If the
queue is closed before this operation runs,
`tf.errors.CancelledError` will be raised. If this operation is
blocked, and either (i) the queue is closed by a close operation
with `cancel_pending_enqueues=True`, or (ii) the session is
@{tf.Session.close},
`tf.errors.CancelledError` will be raised.
Args:
vals: A tensor, a list or tuple of tensors, or a dictionary
from which the queue elements are taken.
name: A name for the operation (optional).
Returns:
The operation that enqueues a batch of tuples of tensors to the queue.
"""
with ops.name_scope(name, "%s_EnqueueMany" % self._name,
self._scope_vals(vals)) as scope:
vals = self._check_enqueue_dtypes(vals)
# NOTE(mrry): Not using a shape function because we need access to
# the `QueueBase` object.
batch_dim = vals[0].get_shape().with_rank_at_least(1)[0]
for val, shape in zip(vals, self._shapes):
batch_dim = batch_dim.merge_with(
val.get_shape().with_rank_at_least(1)[0])
val.get_shape()[1:].assert_is_compatible_with(shape)
return gen_data_flow_ops._queue_enqueue_many_v2(
self._queue_ref, vals, name=scope)
def _dequeue_return_value(self, tensors):
"""Return the value to return from a dequeue op.
If the queue has names, return a dictionary with the
names as keys. Otherwise return either a single tensor
or a list of tensors depending on the length of `tensors`.
Args:
tensors: List of tensors from the dequeue op.
Returns:
A single tensor, a list of tensors, or a dictionary
of tensors.
"""
if self._names:
# The returned values in `tensors` are in the same order as
# the names in `self._names`.
return {n: tensors[i] for i, n in enumerate(self._names)}
elif len(tensors) == 1:
return tensors[0]
else:
return tensors
def dequeue(self, name=None):
"""Dequeues one element from this queue.
If the queue is empty when this operation executes, it will block
until there is an element to dequeue.
At runtime, this operation may raise an error if the queue is
@{tf.QueueBase.close} before or during its execution. If the
queue is closed, the queue is empty, and there are no pending
enqueue operations that can fulfill this request,
`tf.errors.OutOfRangeError` will be raised. If the session is
@{tf.Session.close},
`tf.errors.CancelledError` will be raised.
Args:
name: A name for the operation (optional).
Returns:
The tuple of tensors that was dequeued.
"""
if name is None:
name = "%s_Dequeue" % self._name
if self._queue_ref.dtype == _dtypes.resource:
ret = gen_data_flow_ops._queue_dequeue_v2(
self._queue_ref, self._dtypes, name=name)
else:
ret = gen_data_flow_ops._queue_dequeue(
self._queue_ref, self._dtypes, name=name)
# NOTE(mrry): Not using a shape function because we need access to
# the `QueueBase` object.
op = ret[0].op
for output, shape in zip(op.values(), self._shapes):
output.set_shape(shape)
return self._dequeue_return_value(ret)
def dequeue_many(self, n, name=None):
"""Dequeues and concatenates `n` elements from this queue.
This operation concatenates queue-element component tensors along
the 0th dimension to make a single component tensor. All of the
components in the dequeued tuple will have size `n` in the 0th dimension.
If the queue is closed and there are less than `n` elements left, then an
`OutOfRange` exception is raised.
At runtime, this operation may raise an error if the queue is
@{tf.QueueBase.close} before or during its execution. If the
queue is closed, the queue contains fewer than `n` elements, and
there are no pending enqueue operations that can fulfill this
request, `tf.errors.OutOfRangeError` will be raised. If the
session is @{tf.Session.close},
`tf.errors.CancelledError` will be raised.
Args:
n: A scalar `Tensor` containing the number of elements to dequeue.
name: A name for the operation (optional).
Returns:
The tuple of concatenated tensors that was dequeued.
"""
if name is None:
name = "%s_DequeueMany" % self._name
ret = gen_data_flow_ops._queue_dequeue_many_v2(
self._queue_ref, n=n, component_types=self._dtypes, name=name)
# NOTE(mrry): Not using a shape function because we need access to
# the Queue object.
op = ret[0].op
batch_dim = tensor_shape.Dimension(tensor_util.constant_value(op.inputs[1]))
for output, shape in zip(op.values(), self._shapes):
output.set_shape(tensor_shape.TensorShape([batch_dim]).concatenate(shape))
return self._dequeue_return_value(ret)
def dequeue_up_to(self, n, name=None):
"""Dequeues and concatenates `n` elements from this queue.
**Note** This operation is not supported by all queues. If a queue does not
support DequeueUpTo, then a `tf.errors.UnimplementedError` is raised.
This operation concatenates queue-element component tensors along
the 0th dimension to make a single component tensor. If the queue
has not been closed, all of the components in the dequeued tuple
will have size `n` in the 0th dimension.
If the queue is closed and there are more than `0` but fewer than
`n` elements remaining, then instead of raising a
`tf.errors.OutOfRangeError` like @{tf.QueueBase.dequeue_many},
less than `n` elements are returned immediately. If the queue is
closed and there are `0` elements left in the queue, then a
`tf.errors.OutOfRangeError` is raised just like in `dequeue_many`.
Otherwise the behavior is identical to `dequeue_many`.
Args:
n: A scalar `Tensor` containing the number of elements to dequeue.
name: A name for the operation (optional).
Returns:
The tuple of concatenated tensors that was dequeued.
"""
if name is None:
name = "%s_DequeueUpTo" % self._name
ret = gen_data_flow_ops._queue_dequeue_up_to_v2(
self._queue_ref, n=n, component_types=self._dtypes, name=name)
# NOTE(mrry): Not using a shape function because we need access to
# the Queue object.
op = ret[0].op
for output, shape in zip(op.values(), self._shapes):
output.set_shape(tensor_shape.TensorShape([None]).concatenate(shape))
return self._dequeue_return_value(ret)
def close(self, cancel_pending_enqueues=False, name=None):
"""Closes this queue.
This operation signals that no more elements will be enqueued in
the given queue. Subsequent `enqueue` and `enqueue_many`
operations will fail. Subsequent `dequeue` and `dequeue_many`
operations will continue to succeed if sufficient elements remain
in the queue. Subsequent `dequeue` and `dequeue_many` operations
that would block will fail immediately.
If `cancel_pending_enqueues` is `True`, all pending requests will also
be cancelled.
Args:
cancel_pending_enqueues: (Optional.) A boolean, defaulting to
`False` (described above).
name: A name for the operation (optional).
Returns:
The operation that closes the queue.
"""
if name is None:
name = "%s_Close" % self._name
if self._queue_ref.dtype == _dtypes.resource:
return gen_data_flow_ops._queue_close_v2(
self._queue_ref, cancel_pending_enqueues=cancel_pending_enqueues,
name=name)
else:
return gen_data_flow_ops._queue_close(
self._queue_ref, cancel_pending_enqueues=cancel_pending_enqueues,
name=name)
def size(self, name=None):
"""Compute the number of elements in this queue.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this queue.
"""
if name is None:
name = "%s_Size" % self._name
if self._queue_ref.dtype == _dtypes.resource:
return gen_data_flow_ops._queue_size_v2(self._queue_ref, name=name)
else:
return gen_data_flow_ops._queue_size(self._queue_ref, name=name)
class RandomShuffleQueue(QueueBase):
"""A queue implementation that dequeues elements in a random order.
See @{tf.QueueBase} for a description of the methods on
this class.
"""
def __init__(self, capacity, min_after_dequeue, dtypes, shapes=None,
names=None, seed=None, shared_name=None,
name="random_shuffle_queue"):
"""Create a queue that dequeues elements in a random order.
A `RandomShuffleQueue` has bounded capacity; supports multiple
concurrent producers and consumers; and provides exactly-once
delivery.
A `RandomShuffleQueue` holds a list of up to `capacity`
elements. Each element is a fixed-length tuple of tensors whose
dtypes are described by `dtypes`, and whose shapes are optionally
described by the `shapes` argument.
If the `shapes` argument is specified, each component of a queue
element must have the respective fixed shape. If it is
unspecified, different queue elements may have different shapes,
but the use of `dequeue_many` is disallowed.
The `min_after_dequeue` argument allows the caller to specify a
minimum number of elements that will remain in the queue after a
`dequeue` or `dequeue_many` operation completes, to ensure a
minimum level of mixing of elements. This invariant is maintained
by blocking those operations until sufficient elements have been
enqueued. The `min_after_dequeue` argument is ignored after the
queue has been closed.
Args:
capacity: An integer. The upper bound on the number of elements
that may be stored in this queue.
min_after_dequeue: An integer (described above).
dtypes: A list of `DType` objects. The length of `dtypes` must equal
the number of tensors in each queue element.
shapes: (Optional.) A list of fully-defined `TensorShape` objects
with the same length as `dtypes`, or `None`.
names: (Optional.) A list of string naming the components in the queue
with the same length as `dtypes`, or `None`. If specified the dequeue
methods return a dictionary with the names as keys.
seed: A Python integer. Used to create a random seed. See
@{tf.set_random_seed}
for behavior.
shared_name: (Optional.) If non-empty, this queue will be shared under
the given name across multiple sessions.
name: Optional name for the queue operation.
"""
dtypes = _as_type_list(dtypes)
shapes = _as_shape_list(shapes, dtypes)
names = _as_name_list(names, dtypes)
seed1, seed2 = random_seed.get_seed(seed)
if seed1 is None and seed2 is None:
seed1, seed2 = 0, 0
elif seed is None and shared_name is not None:
# This means that graph seed is provided but op seed is not provided.
# If shared_name is also provided, make seed2 depend only on the graph
# seed and shared_name. (seed2 from get_seed() is generally dependent on
# the id of the last op created.)
string = (str(seed1) + shared_name).encode("utf-8")
seed2 = int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
queue_ref = gen_data_flow_ops._random_shuffle_queue_v2(
component_types=dtypes, shapes=shapes, capacity=capacity,
min_after_dequeue=min_after_dequeue, seed=seed1, seed2=seed2,
shared_name=shared_name, name=name)
super(RandomShuffleQueue, self).__init__(dtypes, shapes, names, queue_ref)
class FIFOQueue(QueueBase):
"""A queue implementation that dequeues elements in first-in first-out order.
See @{tf.QueueBase} for a description of the methods on
this class.
"""
def __init__(self, capacity, dtypes, shapes=None, names=None,
shared_name=None, name="fifo_queue"):
"""Creates a queue that dequeues elements in a first-in first-out order.
A `FIFOQueue` has bounded capacity; supports multiple concurrent
producers and consumers; and provides exactly-once delivery.
A `FIFOQueue` holds a list of up to `capacity` elements. Each
element is a fixed-length tuple of tensors whose dtypes are
described by `dtypes`, and whose shapes are optionally described
by the `shapes` argument.
If the `shapes` argument is specified, each component of a queue
element must have the respective fixed shape. If it is
unspecified, different queue elements may have different shapes,
but the use of `dequeue_many` is disallowed.
Args:
capacity: An integer. The upper bound on the number of elements
that may be stored in this queue.
dtypes: A list of `DType` objects. The length of `dtypes` must equal
the number of tensors in each queue element.
shapes: (Optional.) A list of fully-defined `TensorShape` objects
with the same length as `dtypes`, or `None`.
names: (Optional.) A list of string naming the components in the queue
with the same length as `dtypes`, or `None`. If specified the dequeue
methods return a dictionary with the names as keys.
shared_name: (Optional.) If non-empty, this queue will be shared under
the given name across multiple sessions.
name: Optional name for the queue operation.
"""
dtypes = _as_type_list(dtypes)
shapes = _as_shape_list(shapes, dtypes)
names = _as_name_list(names, dtypes)
queue_ref = gen_data_flow_ops._fifo_queue_v2(
component_types=dtypes, shapes=shapes, capacity=capacity,
shared_name=shared_name, name=name)
super(FIFOQueue, self).__init__(dtypes, shapes, names, queue_ref)
class PaddingFIFOQueue(QueueBase):
"""A FIFOQueue that supports batching variable-sized tensors by padding.
A `PaddingFIFOQueue` may contain components with dynamic shape, while also
supporting `dequeue_many`. See the constructor for more details.
See @{tf.QueueBase} for a description of the methods on
this class.
"""
def __init__(self, capacity, dtypes, shapes, names=None, shared_name=None,
name="padding_fifo_queue"):
"""Creates a queue that dequeues elements in a first-in first-out order.
A `PaddingFIFOQueue` has bounded capacity; supports multiple concurrent
producers and consumers; and provides exactly-once delivery.
A `PaddingFIFOQueue` holds a list of up to `capacity` elements. Each
element is a fixed-length tuple of tensors whose dtypes are
described by `dtypes`, and whose shapes are described by the `shapes`
argument.
The `shapes` argument must be specified; each component of a queue
element must have the respective shape. Shapes of fixed
rank but variable size are allowed by setting any shape dimension to None.
In this case, the inputs' shape may vary along the given dimension, and
`dequeue_many` will pad the given dimension with zeros up to the maximum
shape of all elements in the given batch.
Args:
capacity: An integer. The upper bound on the number of elements
that may be stored in this queue.
dtypes: A list of `DType` objects. The length of `dtypes` must equal
the number of tensors in each queue element.
shapes: A list of `TensorShape` objects, with the same length as
`dtypes`. Any dimension in the `TensorShape` containing value
`None` is dynamic and allows values to be enqueued with
variable size in that dimension.
names: (Optional.) A list of string naming the components in the queue
with the same length as `dtypes`, or `None`. If specified the dequeue
methods return a dictionary with the names as keys.
shared_name: (Optional.) If non-empty, this queue will be shared under
the given name across multiple sessions.
name: Optional name for the queue operation.
Raises:
ValueError: If shapes is not a list of shapes, or the lengths of dtypes
and shapes do not match, or if names is specified and the lengths of
dtypes and names do not match.
"""
dtypes = _as_type_list(dtypes)
shapes = _as_shape_list(shapes, dtypes, unknown_dim_allowed=True)
names = _as_name_list(names, dtypes)
if len(dtypes) != len(shapes):
raise ValueError("Shapes must be provided for all components, "
"but received %d dtypes and %d shapes."
% (len(dtypes), len(shapes)))
queue_ref = gen_data_flow_ops._padding_fifo_queue_v2(
component_types=dtypes, shapes=shapes, capacity=capacity,
shared_name=shared_name, name=name)
super(PaddingFIFOQueue, self).__init__(dtypes, shapes, names, queue_ref)
class PriorityQueue(QueueBase):
"""A queue implementation that dequeues elements in prioritized order.
See @{tf.QueueBase} for a description of the methods on
this class.
"""
def __init__(self, capacity, types, shapes=None, names=None, shared_name=None,
name="priority_queue"):
"""Creates a queue that dequeues elements in a first-in first-out order.
A `PriorityQueue` has bounded capacity; supports multiple concurrent
producers and consumers; and provides exactly-once delivery.
A `PriorityQueue` holds a list of up to `capacity` elements. Each
element is a fixed-length tuple of tensors whose dtypes are
described by `types`, and whose shapes are optionally described
by the `shapes` argument.
If the `shapes` argument is specified, each component of a queue
element must have the respective fixed shape. If it is
unspecified, different queue elements may have different shapes,
but the use of `dequeue_many` is disallowed.
Enqueues and Dequeues to the `PriorityQueue` must include an additional
tuple entry at the beginning: the `priority`. The priority must be
an int64 scalar (for `enqueue`) or an int64 vector (for `enqueue_many`).
Args:
capacity: An integer. The upper bound on the number of elements
that may be stored in this queue.
types: A list of `DType` objects. The length of `types` must equal
the number of tensors in each queue element, except the first priority
element. The first tensor in each element is the priority,
which must be type int64.
shapes: (Optional.) A list of fully-defined `TensorShape` objects,
with the same length as `types`, or `None`.
names: (Optional.) A list of strings naming the components in the queue
with the same length as `dtypes`, or `None`. If specified, the dequeue
methods return a dictionary with the names as keys.
shared_name: (Optional.) If non-empty, this queue will be shared under
the given name across multiple sessions.
name: Optional name for the queue operation.
"""
types = _as_type_list(types)
shapes = _as_shape_list(shapes, types)
queue_ref = gen_data_flow_ops._priority_queue_v2(
component_types=types, shapes=shapes, capacity=capacity,
shared_name=shared_name, name=name)
priority_dtypes = [_dtypes.int64] + types
priority_shapes = [()] + shapes if shapes else shapes
super(PriorityQueue, self).__init__(
priority_dtypes, priority_shapes, names, queue_ref)
# TODO(josh11b): class BatchQueue(QueueBase):
class Barrier(object):
"""Represents a key-value map that persists across graph executions."""
def __init__(self, types, shapes=None, shared_name=None, name="barrier"):
"""Creates a barrier that persists across different graph executions.
A barrier represents a key-value map, where each key is a string, and
each value is a tuple of tensors.
At runtime, the barrier contains 'complete' and 'incomplete'
elements. A complete element has defined tensors for all
components of its value tuple, and may be accessed using
take_many. An incomplete element has some undefined components in
its value tuple, and may be updated using insert_many.
The barrier call `take_many` outputs values in a particular order.
First, it only outputs completed values. Second, the order in which
completed values are returned matches the order in which their very
first component was inserted into the barrier. So, for example, for this
sequence of insertions and removals:
barrier = Barrier((tf.string, tf.int32), shapes=((), ()))
barrier.insert_many(0, keys=["k1", "k2"], values=["a", "b"]).run()
barrier.insert_many(1, keys=["k1"], values=[1]).run()
barrier.insert_many(0, keys=["k3"], values=["c"]).run()
barrier.insert_many(1, keys=["k3"], values=[3]).run()
barrier.insert_many(1, keys=["k2"], values=[2]).run()
(indices, keys, values) = barrier.take_many(2)
(indices_val, keys_val, values0_val, values1_val) =
session.run([indices, keys, values[0], values[1]])
The output will be (up to permutation of "k1" and "k2"):
indices_val == (-2**63, -2**63)
keys_val == ("k1", "k2")
values0_val == ("a", "b")
values1_val == (1, 2)
Note the key "k2" was inserted into the barrier before "k3". Even though
"k3" was completed first, both are complete by the time
take_many is called. As a result, "k2" is prioritized and "k1" and "k2"
are returned first. "k3" remains in the barrier until the next execution
of `take_many`. Since "k1" and "k2" had their first insertions into
the barrier together, their indices are the same (-2**63). The index
of "k3" will be -2**63 + 1, because it was the next new inserted key.
Args:
types: A single dtype or a tuple of dtypes, corresponding to the
dtypes of the tensor elements that comprise a value in this barrier.
shapes: Optional. Constraints on the shapes of tensors in the values:
a single tensor shape tuple; a tuple of tensor shape tuples
for each barrier-element tuple component; or None if the shape should
not be constrained.
shared_name: Optional. If non-empty, this barrier will be shared under
the given name across multiple sessions.
name: Optional name for the barrier op.
Raises:
ValueError: If one of the `shapes` indicate no elements.
"""
self._types = _as_type_list(types)
if shapes is not None:
shapes = _as_shape_list(shapes, self._types)
self._shapes = [tensor_shape.TensorShape(s) for s in shapes]
for i, shape in enumerate(self._shapes):
if shape.num_elements() == 0:
raise ValueError("Empty tensors are not supported, but received "
"shape '%s' at index %d" % (shape, i))
else:
self._shapes = [tensor_shape.unknown_shape() for _ in self._types]
self._barrier_ref = gen_data_flow_ops._barrier(
component_types=self._types, shapes=self._shapes,
shared_name=shared_name, name=name)
self._name = self._barrier_ref.op.name.split("/")[-1]
@property
def barrier_ref(self):
"""Get the underlying barrier reference."""
return self._barrier_ref
@property
def name(self):
"""The name of the underlying barrier."""
return self._barrier_ref.op.name
def insert_many(self, component_index, keys, values, name=None):
"""For each key, assigns the respective value to the specified component.
This operation updates each element at component_index.
Args:
component_index: The component of the value that is being assigned.
keys: A vector of keys, with length n.
values: An any-dimensional tensor of values, which are associated with the
respective keys. The first dimension must have length n.
name: Optional name for the op.
Returns:
The operation that performs the insertion.
Raises:
InvalidArgumentsError: If inserting keys and values without elements.
"""
if name is None:
name = "%s_BarrierInsertMany" % self._name
return gen_data_flow_ops._barrier_insert_many(
self._barrier_ref, keys, values, component_index, name=name)
def take_many(self,
num_elements,
allow_small_batch=False,
timeout=None,
name=None):
"""Takes the given number of completed elements from this barrier.
This operation concatenates completed-element component tensors along
the 0th dimension to make a single component tensor.
If barrier has no completed elements, this operation will block
until there are 'num_elements' elements to take.
Args:
num_elements: The number of elements to take.
allow_small_batch: If the barrier is closed, don't block if there are less
completed elements than requested, but instead return all available
completed elements.
TODO(b/25743580): the semantics of `allow_small_batch` are experimental
and may be extended to other cases in the future.
TODO(ebrevdo): If a take_many(allow_small_batch=True) is blocking
already when the barrier is closed, it will block for ever. Fix this
by using asynchronous operations.
timeout: This specifies the number of milliseconds to block
before returning with DEADLINE_EXCEEDED. (This option is not
supported yet.)
name: A name for the operation (optional).
Returns:
A tuple of (index, key, value_list).
"index" is a int64 tensor of length num_elements containing the
index of the insert_many call for which the very first component of
the given element was inserted into the Barrier, starting with
the value -2**63. Note, this value is different from the
index of the insert_many call for which the element was completed.
"key" is a string tensor of length num_elements containing the keys.
"value_list" is a tuple of tensors, each one with size num_elements
in the 0th dimension for each component in the barrier's values.
"""
if name is None:
name = "%s_BarrierTakeMany" % self._name
ret = gen_data_flow_ops._barrier_take_many(self._barrier_ref,
num_elements,
self._types,
allow_small_batch,
timeout,
name=name)
# NOTE(mrry): Not using a shape function because we need access to
# the Barrier object.
op = ret[0].op
if allow_small_batch:
batch_dim = None
else:
batch_dim = tensor_shape.Dimension(
tensor_util.constant_value(op.inputs[1]))
op.outputs[0].set_shape(tensor_shape.vector(batch_dim)) # indices
op.outputs[1].set_shape(tensor_shape.vector(batch_dim)) # keys
for output, shape in zip(op.outputs[2:], self._shapes): # value_list
output.set_shape(tensor_shape.TensorShape([batch_dim]).concatenate(shape))
return ret
def close(self, cancel_pending_enqueues=False, name=None):
"""Closes this barrier.
This operation signals that no more new key values will be inserted in the
given barrier. Subsequent InsertMany operations with new keys will fail.
InsertMany operations that just complement already existing keys with other
components, will continue to succeed. Subsequent TakeMany operations will
continue to succeed if sufficient elements remain in the barrier. Subsequent
TakeMany operations that would block will fail immediately.
If `cancel_pending_enqueues` is `True`, all pending requests to the
underlying queue will also be cancelled, and completing of already
started values is also not acceptable anymore.
Args:
cancel_pending_enqueues: (Optional.) A boolean, defaulting to
`False` (described above).
name: Optional name for the op.
Returns:
The operation that closes the barrier.
"""
if name is None:
name = "%s_BarrierClose" % self._name
return gen_data_flow_ops._barrier_close(
self._barrier_ref,
cancel_pending_enqueues=cancel_pending_enqueues,
name=name)
def ready_size(self, name=None):
"""Compute the number of complete elements in the given barrier.
Args:
name: A name for the operation (optional).
Returns:
A single-element tensor containing the number of complete elements in the
given barrier.
"""
if name is None:
name = "%s_BarrierReadySize" % self._name
return gen_data_flow_ops._barrier_ready_size(self._barrier_ref, name=name)
def incomplete_size(self, name=None):
"""Compute the number of incomplete elements in the given barrier.
Args:
name: A name for the operation (optional).
Returns:
A single-element tensor containing the number of incomplete elements in
the given barrier.
"""
if name is None:
name = "%s_BarrierIncompleteSize" % self._name
return gen_data_flow_ops._barrier_incomplete_size(
self._barrier_ref, name=name)
@deprecated("2017-03-02", "Use `tf.tables_initializer` instead.")
def initialize_all_tables(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
Args:
name: Optional name for the initialization op.
Returns:
An Op that initializes all tables. Note that if there are
not tables the returned Op is a NoOp.
"""
return tables_initializer(name)
def tables_initializer(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
Args:
name: Optional name for the initialization op.
Returns:
An Op that initializes all tables. Note that if there are
not tables the returned Op is a NoOp.
"""
initializers = ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS)
if initializers:
return control_flow_ops.group(*initializers, name=name)
return control_flow_ops.no_op(name=name)
ops.NotDifferentiable("LookupTableFind")
ops.NotDifferentiable("LookupTableInsert")
ops.NotDifferentiable("LookupTableSize")
ops.NotDifferentiable("HashTable")
ops.NotDifferentiable("InitializeTable")
ops.NotDifferentiable("InitializeTableFromTextFile")
ops.NotDifferentiable("MutableDenseHashTable")
ops.NotDifferentiable("MutableHashTable")
ops.NotDifferentiable("MutableHashTableOfTensors")
class ConditionalAccumulatorBase(object):
"""A conditional accumulator for aggregating gradients.
Up-to-date gradients (i.e., time step at which gradient was computed is
equal to the accumulator's time step) are added to the accumulator.
Extraction of the average gradient is blocked until the required number of
gradients has been accumulated.
"""
def __init__(self, dtype, shape, accumulator_ref):
"""Creates a new ConditionalAccumulator.
Args:
dtype: Datatype of the accumulated gradients.
shape: Shape of the accumulated gradients.
accumulator_ref: A handle to the conditional accumulator, created by sub-
classes
"""
self._dtype = dtype
if shape is not None:
self._shape = tensor_shape.TensorShape(shape)
else:
self._shape = tensor_shape.unknown_shape()
self._accumulator_ref = accumulator_ref
self._name = self._accumulator_ref.op.name.split("/")[-1]
@property
def accumulator_ref(self):
"""The underlying accumulator reference."""
return self._accumulator_ref
@property
def name(self):
"""The name of the underlying accumulator."""
return self._name
@property
def dtype(self):
"""The datatype of the gradients accumulated by this accumulator."""
return self._dtype
def num_accumulated(self, name=None):
"""Number of gradients that have currently been aggregated in accumulator.
Args:
name: Optional name for the operation.
Returns:
Number of accumulated gradients currently in accumulator.
"""
if name is None:
name = "%s_NumAccumulated" % self._name
return gen_data_flow_ops.accumulator_num_accumulated(
self._accumulator_ref, name=name)
def set_global_step(self, new_global_step, name=None):
"""Sets the global time step of the accumulator.
The operation logs a warning if we attempt to set to a time step that is
lower than the accumulator's own time step.
Args:
new_global_step: Value of new time step. Can be a variable or a constant
name: Optional name for the operation.
Returns:
Operation that sets the accumulator's time step.
"""
return gen_data_flow_ops.accumulator_set_global_step(
self._accumulator_ref,
math_ops.to_int64(ops.convert_to_tensor(new_global_step)),
name=name)
class ConditionalAccumulator(ConditionalAccumulatorBase):
"""A conditional accumulator for aggregating gradients.
Up-to-date gradients (i.e., time step at which gradient was computed is
equal to the accumulator's time step) are added to the accumulator.
Extraction of the average gradient is blocked until the required number of
gradients has been accumulated.
"""
def __init__(self,
dtype,
shape=None,
shared_name=None,
name="conditional_accumulator"):
"""Creates a new ConditionalAccumulator.
Args:
dtype: Datatype of the accumulated gradients.
shape: Shape of the accumulated gradients.
shared_name: Optional. If non-empty, this accumulator will be shared under
the given name across multiple sessions.
name: Optional name for the accumulator.
"""
accumulator_ref = gen_data_flow_ops.conditional_accumulator(
dtype=dtype, shape=shape, shared_name=shared_name, name=name)
super(ConditionalAccumulator, self).__init__(dtype, shape, accumulator_ref)
def apply_grad(self, grad, local_step=0, name=None):
"""Attempts to apply a gradient to the accumulator.
The attempt is silently dropped if the gradient is stale, i.e., local_step
is less than the accumulator's global time step.
Args:
grad: The gradient tensor to be applied.
local_step: Time step at which the gradient was computed.
name: Optional name for the operation.
Returns:
The operation that (conditionally) applies a gradient to the accumulator.
Raises:
ValueError: If grad is of the wrong shape
"""
grad = ops.convert_to_tensor(grad, self._dtype)
grad.get_shape().assert_is_compatible_with(self._shape)
local_step = math_ops.to_int64(ops.convert_to_tensor(local_step))
return gen_data_flow_ops.accumulator_apply_gradient(
self._accumulator_ref, local_step=local_step, gradient=grad, name=name)
def take_grad(self, num_required, name=None):
"""Attempts to extract the average gradient from the accumulator.
The operation blocks until sufficient number of gradients have been
successfully applied to the accumulator.
Once successful, the following actions are also triggered:
- Counter of accumulated gradients is reset to 0.
- Aggregated gradient is reset to 0 tensor.
- Accumulator's internal time step is incremented by 1.
Args:
num_required: Number of gradients that needs to have been aggregated
name: Optional name for the operation
Returns:
A tensor holding the value of the average gradient.
Raises:
InvalidArgumentError: If num_required < 1
"""
return gen_data_flow_ops.accumulator_take_gradient(
self._accumulator_ref, num_required, dtype=self._dtype, name=name)
class SparseConditionalAccumulator(ConditionalAccumulatorBase):
"""A conditional accumulator for aggregating sparse gradients.
Sparse gradients are represented by IndexedSlices.
Up-to-date gradients (i.e., time step at which gradient was computed is
equal to the accumulator's time step) are added to the accumulator.
Extraction of the average gradient is blocked until the required number of
gradients has been accumulated.
Args:
dtype: Datatype of the accumulated gradients.
shape: Shape of the accumulated gradients.
shared_name: Optional. If non-empty, this accumulator will be shared under
the given name across multiple sessions.
name: Optional name for the accumulator.
"""
def __init__(self,
dtype,
shape=None,
shared_name=None,
name="sparse_conditional_accumulator"):
accumulator_ref = gen_data_flow_ops.sparse_conditional_accumulator(
dtype=dtype, shape=shape, shared_name=shared_name, name=name)
super(SparseConditionalAccumulator,
self).__init__(dtype, shape, accumulator_ref)
def apply_indexed_slices_grad(self, grad, local_step=0, name=None):
"""Attempts to apply a gradient to the accumulator.
The attempt is silently dropped if the gradient is stale, i.e., local_step
is less than the accumulator's global time step.
Args:
grad: The gradient IndexedSlices to be applied.
local_step: Time step at which the gradient was computed.
name: Optional name for the operation.
Returns:
The operation that (conditionally) applies a gradient to the accumulator.
Raises:
InvalidArgumentError: If grad is of the wrong shape
"""
return self.apply_grad(
grad_indices=grad.indices,
grad_values=grad.values,
grad_shape=grad.dense_shape,
local_step=local_step,
name=name)
def apply_grad(self,
grad_indices,
grad_values,
grad_shape=None,
local_step=0,
name=None):
"""Attempts to apply a sparse gradient to the accumulator.
The attempt is silently dropped if the gradient is stale, i.e., local_step
is less than the accumulator's global time step.
A sparse gradient is represented by its indices, values and possibly empty
or None shape. Indices must be a vector representing the locations of
non-zero entries in the tensor. Values are the non-zero slices of the
gradient, and must have the same first dimension as indices, i.e., the nnz
represented by indices and values must be consistent. Shape, if not empty or
None, must be consistent with the accumulator's shape (if also provided).
Example:
A tensor [[0, 0], [0. 1], [2, 3]] can be represented
indices: [1,2]
values: [[0,1],[2,3]]
shape: [3, 2]
Args:
grad_indices: Indices of the sparse gradient to be applied.
grad_values: Values of the sparse gradient to be applied.
grad_shape: Shape of the sparse gradient to be applied.
local_step: Time step at which the gradient was computed.
name: Optional name for the operation.
Returns:
The operation that (conditionally) applies a gradient to the accumulator.
Raises:
InvalidArgumentError: If grad is of the wrong shape
"""
local_step = math_ops.to_int64(ops.convert_to_tensor(local_step))
return gen_data_flow_ops.sparse_accumulator_apply_gradient(
self._accumulator_ref,
local_step=local_step,
gradient_indices=math_ops.to_int64(grad_indices),
gradient_values=grad_values,
gradient_shape=math_ops.to_int64([] if grad_shape is None else
grad_shape),
has_known_shape=(grad_shape is not None),
name=name)
def take_grad(self, num_required, name=None):
"""Attempts to extract the average gradient from the accumulator.
The operation blocks until sufficient number of gradients have been
successfully applied to the accumulator.
Once successful, the following actions are also triggered:
- Counter of accumulated gradients is reset to 0.
- Aggregated gradient is reset to 0 tensor.
- Accumulator's internal time step is incremented by 1.
Args:
num_required: Number of gradients that needs to have been aggregated
name: Optional name for the operation
Returns:
A tuple of indices, values, and shape representing the average gradient.
Raises:
InvalidArgumentError: If num_required < 1
"""
return gen_data_flow_ops.sparse_accumulator_take_gradient(
self._accumulator_ref, num_required, dtype=self._dtype, name=name)
def take_indexed_slices_grad(self, num_required, name=None):
"""Attempts to extract the average gradient from the accumulator.
The operation blocks until sufficient number of gradients have been
successfully applied to the accumulator.
Once successful, the following actions are also triggered:
- Counter of accumulated gradients is reset to 0.
- Aggregated gradient is reset to 0 tensor.
- Accumulator's internal time step is incremented by 1.
Args:
num_required: Number of gradients that needs to have been aggregated
name: Optional name for the operation
Returns:
An IndexedSlices holding the value of the average gradient.
Raises:
InvalidArgumentError: If num_required < 1
"""
return_val = gen_data_flow_ops.sparse_accumulator_take_gradient(
self._accumulator_ref, num_required, dtype=self._dtype, name=name)
return ops.IndexedSlices(
indices=return_val.indices,
values=return_val.values,
dense_shape=return_val.shape)
class StagingArea(object):
"""Class for staging inputs. No ordering guarantees.
A `StagingArea` is a TensorFlow data structure that stores tensors across
multiple steps, and exposes operations that can put and get
tensors.
Each `StagingArea` element is a tuple of one or more tensors, where each
tuple component has a static dtype, and may have a static shape.
The capacity of a `StagingArea` is unbounded and supports multiple
concurrent producers and consumers; and provides exactly-once delivery.
Each element of a `StagingArea` is a fixed-length tuple of tensors whose
dtypes are described by `dtypes`, and whose shapes are optionally described
by the `shapes` argument.
If the `shapes` argument is specified, each component of a staging area
element must have the respective fixed shape. If it is
unspecified, different elements may have different shapes,
"""
_identifier = 0
_lock = threading.Lock()
def __init__(self, dtypes, shapes=None, names=None, shared_name=None):
"""Constructs a staging area object.
The two optional lists, `shapes` and `names`, must be of the same length
as `dtypes` if provided. The values at a given index `i` indicate the
shape and name to use for the corresponding queue component in `dtypes`.
The device scope at the time of object creation determines where the
storage for the `StagingArea` will reside. Calls to `put` will incur a copy
to this memory space, if necessary. Tensors returned by `get` will be
placed according to the device scope when `get` is called.
Args:
dtypes: A list of types. The length of dtypes must equal the number
of tensors in each element.
shapes: (Optional.) Constraints on the shapes of tensors in an element.
A list of shape tuples or None. This list is the same length
as dtypes. If the shape of any tensors in the element are constrained,
all must be; shapes can be None if the shapes should not be constrained.
names: (Optional.) If provided, the `get()` and
`put()` methods will use dictionaries with these names as keys.
Must be None or a list or tuple of the same length as `dtypes`.
shared_name: (Optional.) A name to be used for the shared object. By
passing the same name to two different python objects they will share
the underlying staging area. Must be a string.
Raises:
ValueError: If one of the arguments is invalid.
"""
if shared_name is None:
self._name = ops.get_default_graph().unique_name("StagingArea")
elif isinstance(shared_name, six.string_types):
self._name = shared_name
else:
raise ValueError("shared_name must be a string")
self._dtypes = dtypes
if shapes is not None:
if len(shapes) != len(dtypes):
raise ValueError("StagingArea shapes must be the same length as dtypes")
self._shapes = [tensor_shape.TensorShape(s) for s in shapes]
else:
self._shapes = [tensor_shape.unknown_shape() for _ in self._dtypes]
if names is not None:
if len(names) != len(dtypes):
raise ValueError("StagingArea names must be the same length as dtypes")
self._names = names
else:
self._names = None
# all get and put ops must colocate with this op
with ops.name_scope("%s_root" % self._name):
self._coloc_op = control_flow_ops.no_op()
@property
def name(self):
"""The name of the staging area."""
return self._name
@property
def dtypes(self):
"""The list of dtypes for each component of a staging area element."""
return self._dtypes
@property
def shapes(self):
"""The list of shapes for each component of a staging area element."""
return self._shapes
@property
def names(self):
"""The list of names for each component of a staging area element."""
return self._names
def _check_put_dtypes(self, vals):
"""Validate and convert `vals` to a list of `Tensor`s.
The `vals` argument can be a Tensor, a list or tuple of tensors, or a
dictionary with tensor values.
If it is a dictionary, the staging area must have been constructed with a
`names` attribute and the dictionary keys must match the staging area names.
If the staging area was constructed with a `names` attribute, `vals` must
be a dictionary.
Args:
vals: A tensor, a list or tuple of tensors, or a dictionary..
Returns:
A list of `Tensor` objects.
Raises:
ValueError: If `vals` is invalid.
"""
if isinstance(vals, dict):
if not self._names:
raise ValueError(
"Staging areas must have names to enqueue a dictionary")
if sorted(self._names) != sorted(vals.keys()):
raise ValueError("Keys in dictionary to put do not match names "
"of staging area. Dictionary: (%s), Queue: (%s)" %
(sorted(vals.keys()), sorted(self._names)))
# The order of values in `self._names` indicates the order in which the
# tensors in the dictionary `vals` must be listed.
vals = [vals[k] for k in self._names]
else:
if self._names:
raise ValueError("You must enqueue a dictionary in a staging area "
"with names")
if not isinstance(vals, (list, tuple)):
vals = [vals]
tensors = []
for i, (val, dtype) in enumerate(zip(vals, self._dtypes)):
tensors.append(
ops.convert_to_tensor(
val, dtype=dtype, name="component_%d" % i))
return tensors
def _scope_vals(self, vals):
"""Return a list of values to pass to `name_scope()`.
Args:
vals: A tensor, a list or tuple of tensors, or a dictionary.
Returns:
The values in vals as a list.
"""
if isinstance(vals, (list, tuple)):
return vals
elif isinstance(vals, dict):
return vals.values()
else:
return [vals]
def put(self, values, name=None):
"""Create an op that places a value into the staging area.
Args:
values: Tensor (or a tuple of Tensors) to place into the staging area.
name: A name for the operation (optional).
Returns:
The created op.
Raises:
ValueError: If the number or type of inputs don't match the staging area.
"""
with ops.name_scope(name, "%s_put" % self._name,
self._scope_vals(values)) as scope:
vals = self._check_put_dtypes(values)
if len(values) != len(self._dtypes):
raise ValueError("Unexpected number of inputs " + str(len(values)) +
"vs " + str(len(self._dtypes)))
for val, dtype in zip(vals, self._dtypes):
if val.dtype != dtype:
raise ValueError("Datatypes do not match. " + str(val.dtype) + " != "
+ str(dtype))
for val, shape in zip(vals, self._shapes):
val.get_shape().assert_is_compatible_with(shape)
with ops.colocate_with(self._coloc_op):
op = gen_data_flow_ops.stage(values=vals, shared_name=self._name,
name=scope)
return op
def _get_return_value(self, tensors):
"""Return the value to return from a get op.
If the staging area has names, return a dictionary with the
names as keys. Otherwise return either a single tensor
or a list of tensors depending on the length of `tensors`.
Args:
tensors: List of tensors from the get op.
Returns:
A single tensor, a list of tensors, or a dictionary
of tensors.
"""
if self._names:
# The returned values in `tensors` are in the same order as
# the names in `self._names`.
return {n: tensors[i] for i, n in enumerate(self._names)}
elif len(tensors) == 1:
return tensors[0]
else:
return tensors
def get(self, name=None):
"""Gets one element from this staging area.
If the staging area is empty when this operation executes, it will block
until there is an element to dequeue.
The placement of the returned tensor will be determined by the current
device scope when this function is called.
Args:
name: A name for the operation (optional).
Returns:
The tuple of tensors that was gotten.
"""
if name is None:
name = "%s_get" % self._name
with ops.colocate_with(self._coloc_op):
ret = gen_data_flow_ops.unstage(dtypes=self._dtypes,
shared_name=self._name, name=name)
curr_device_scope = control_flow_ops.no_op().device
if curr_device_scope != self._coloc_op.device:
for i in range(len(ret)):
ret[i] = array_ops.identity(ret[i])
for output, shape in zip(ret, self._shapes):
output.set_shape(shape)
return self._get_return_value(ret)
class RecordInput(object):
"""RecordInput asynchronously reads and randomly yields TFRecords.
A RecordInput Op will continuously read a batch of records asynchronously
into a buffer of some fixed capacity. It can also asynchronously yield
random records from this buffer.
It will not start yielding until at least `buffer_size / 2` elements have been
placed into the buffer so that sufficient randomization can take place.
The order the files are read will be shifted each epoch by `shift_amount` so
that the data is presented in a different order every epoch.
"""
def __init__(self,
file_pattern,
batch_size=1,
buffer_size=1,
parallelism=1,
shift_ratio=0,
seed=0,
name=None):
"""Constructs a RecordInput Op.
Args:
file_pattern: File path to the dataset, possibly containing wildcards.
All matching files will be iterated over each epoch.
batch_size: How many records to return at a time.
buffer_size: The maximum number of records the buffer will contain. This
_must_ be smaller than the total number of records in an epoch or
deadlock can occur.
parallelism: How many reader threads to use for reading from files.
shift_ratio: What percentage of the total number files to move the start
file forward by each epoch.
seed: Specify the random number seed used by generator that randomizes
records.
name: Optional name for the operation.
Raises:
ValueError: If one of the arguments is invalid.
"""
self._batch_size = batch_size
self._file_pattern = file_pattern
self._buffer_size = buffer_size
self._parallelism = parallelism
self._shift_ratio = shift_ratio
self._seed = seed
self._name = name
def get_yield_op(self):
"""Add a node that yields a minibatch every time it is executed."""
return gen_data_flow_ops.record_input(
file_pattern=self._file_pattern,
file_buffer_size=self._buffer_size,
file_parallelism=self._parallelism,
file_shuffle_shift_ratio=self._shift_ratio,
batch_size=self._batch_size,
file_random_seed=self._seed,
name=self._name)
| apache-2.0 |
darjeeling/django | tests/select_related/models.py | 79 | 3040 | """
Tests for select_related()
``select_related()`` follows all relationships and pre-caches any foreign key
values so that complex trees can be fetched in a single query. However, this
isn't always a good idea, so the ``depth`` argument control how many "levels"
the select-related behavior will traverse.
"""
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
# Who remembers high school biology?
class Domain(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Kingdom(models.Model):
name = models.CharField(max_length=50)
domain = models.ForeignKey(Domain, models.CASCADE)
def __str__(self):
return self.name
class Phylum(models.Model):
name = models.CharField(max_length=50)
kingdom = models.ForeignKey(Kingdom, models.CASCADE)
def __str__(self):
return self.name
class Klass(models.Model):
name = models.CharField(max_length=50)
phylum = models.ForeignKey(Phylum, models.CASCADE)
def __str__(self):
return self.name
class Order(models.Model):
name = models.CharField(max_length=50)
klass = models.ForeignKey(Klass, models.CASCADE)
def __str__(self):
return self.name
class Family(models.Model):
name = models.CharField(max_length=50)
order = models.ForeignKey(Order, models.CASCADE)
def __str__(self):
return self.name
class Genus(models.Model):
name = models.CharField(max_length=50)
family = models.ForeignKey(Family, models.CASCADE)
def __str__(self):
return self.name
class Species(models.Model):
name = models.CharField(max_length=50)
genus = models.ForeignKey(Genus, models.CASCADE)
def __str__(self):
return self.name
# and we'll invent a new thing so we have a model with two foreign keys
class HybridSpecies(models.Model):
name = models.CharField(max_length=50)
parent_1 = models.ForeignKey(Species, models.CASCADE, related_name='child_1')
parent_2 = models.ForeignKey(Species, models.CASCADE, related_name='child_2')
def __str__(self):
return self.name
class Topping(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class Pizza(models.Model):
name = models.CharField(max_length=100)
toppings = models.ManyToManyField(Topping)
def __str__(self):
return self.name
class TaggedItem(models.Model):
tag = models.CharField(max_length=30)
content_type = models.ForeignKey(ContentType, models.CASCADE, related_name='select_related_tagged_items')
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.tag
class Bookmark(models.Model):
url = models.URLField()
tags = GenericRelation(TaggedItem)
def __str__(self):
return self.url
| bsd-3-clause |
ukanga/SickRage | lib/hachoir_parser/file_system/mbr.py | 86 | 7764 | """
Master Boot Record.
"""
# cfdisk uses the following algorithm to compute the geometry:
# 0. Use the values given by the user.
# 1. Try to guess the geometry from the partition table:
# if all the used partitions end at the same head H and the
# same sector S, then there are (H+1) heads and S sectors/cylinder.
# 2. Ask the system (ioctl/HDIO_GETGEO).
# 3. 255 heads and 63 sectors/cylinder.
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet,
Enum, Bits, UInt8, UInt16, UInt32,
RawBytes)
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_core.tools import humanFilesize
from hachoir_core.text_handler import textHandler, hexadecimal
BLOCK_SIZE = 512 # bytes
class CylinderNumber(Bits):
def __init__(self, parent, name, description=None):
Bits.__init__(self, parent, name, 10, description)
def createValue(self):
i = self.parent.stream.readInteger(
self.absolute_address, False, self._size, self.parent.endian)
return i >> 2 | i % 4 << 8
class PartitionHeader(FieldSet):
static_size = 16*8
# taken from the source of cfdisk:
# sed -n 's/.*{\(.*\), N_(\(.*\))}.*/ \1: \2,/p' i386_sys_types.c
system_name = {
0x00: "Empty",
0x01: "FAT12",
0x02: "XENIX root",
0x03: "XENIX usr",
0x04: "FAT16 <32M",
0x05: "Extended",
0x06: "FAT16",
0x07: "HPFS/NTFS",
0x08: "AIX",
0x09: "AIX bootable",
0x0a: "OS/2 Boot Manager",
0x0b: "W95 FAT32",
0x0c: "W95 FAT32 (LBA)",
0x0e: "W95 FAT16 (LBA)",
0x0f: "W95 Ext'd (LBA)",
0x10: "OPUS",
0x11: "Hidden FAT12",
0x12: "Compaq diagnostics",
0x14: "Hidden FAT16 <32M",
0x16: "Hidden FAT16",
0x17: "Hidden HPFS/NTFS",
0x18: "AST SmartSleep",
0x1b: "Hidden W95 FAT32",
0x1c: "Hidden W95 FAT32 (LBA)",
0x1e: "Hidden W95 FAT16 (LBA)",
0x24: "NEC DOS",
0x39: "Plan 9",
0x3c: "PartitionMagic recovery",
0x40: "Venix 80286",
0x41: "PPC PReP Boot",
0x42: "SFS",
0x4d: "QNX4.x",
0x4e: "QNX4.x 2nd part",
0x4f: "QNX4.x 3rd part",
0x50: "OnTrack DM",
0x51: "OnTrack DM6 Aux1",
0x52: "CP/M",
0x53: "OnTrack DM6 Aux3",
0x54: "OnTrackDM6",
0x55: "EZ-Drive",
0x56: "Golden Bow",
0x5c: "Priam Edisk",
0x61: "SpeedStor",
0x63: "GNU HURD or SysV",
0x64: "Novell Netware 286",
0x65: "Novell Netware 386",
0x70: "DiskSecure Multi-Boot",
0x75: "PC/IX",
0x80: "Old Minix",
0x81: "Minix / old Linux",
0x82: "Linux swap / Solaris",
0x83: "Linux (ext2/ext3)",
0x84: "OS/2 hidden C: drive",
0x85: "Linux extended",
0x86: "NTFS volume set",
0x87: "NTFS volume set",
0x88: "Linux plaintext",
0x8e: "Linux LVM",
0x93: "Amoeba",
0x94: "Amoeba BBT",
0x9f: "BSD/OS",
0xa0: "IBM Thinkpad hibernation",
0xa5: "FreeBSD",
0xa6: "OpenBSD",
0xa7: "NeXTSTEP",
0xa8: "Darwin UFS",
0xa9: "NetBSD",
0xab: "Darwin boot",
0xb7: "BSDI fs",
0xb8: "BSDI swap",
0xbb: "Boot Wizard hidden",
0xbe: "Solaris boot",
0xbf: "Solaris",
0xc1: "DRDOS/sec (FAT-12)",
0xc4: "DRDOS/sec (FAT-16 < 32M)",
0xc6: "DRDOS/sec (FAT-16)",
0xc7: "Syrinx",
0xda: "Non-FS data",
0xdb: "CP/M / CTOS / ...",
0xde: "Dell Utility",
0xdf: "BootIt",
0xe1: "DOS access",
0xe3: "DOS R/O",
0xe4: "SpeedStor",
0xeb: "BeOS fs",
0xee: "EFI GPT",
0xef: "EFI (FAT-12/16/32)",
0xf0: "Linux/PA-RISC boot",
0xf1: "SpeedStor",
0xf4: "SpeedStor",
0xf2: "DOS secondary",
0xfd: "Linux raid autodetect",
0xfe: "LANstep",
0xff: "BBT"
}
def createFields(self):
yield UInt8(self, "bootable", "Bootable flag (true if equals to 0x80)")
if self["bootable"].value not in (0x00, 0x80):
self.warning("Stream doesn't look like master boot record (partition bootable error)!")
yield UInt8(self, "start_head", "Starting head number of the partition")
yield Bits(self, "start_sector", 6, "Starting sector number of the partition")
yield CylinderNumber(self, "start_cylinder", "Starting cylinder number of the partition")
yield Enum(UInt8(self, "system", "System indicator"), self.system_name)
yield UInt8(self, "end_head", "Ending head number of the partition")
yield Bits(self, "end_sector", 6, "Ending sector number of the partition")
yield CylinderNumber(self, "end_cylinder", "Ending cylinder number of the partition")
yield UInt32(self, "LBA", "LBA (number of sectors before this partition)")
yield UInt32(self, "size", "Size (block count)")
def isUsed(self):
return self["system"].value != 0
def createDescription(self):
desc = "Partition header: "
if self.isUsed():
system = self["system"].display
size = self["size"].value * BLOCK_SIZE
desc += "%s, %s" % (system, humanFilesize(size))
else:
desc += "(unused)"
return desc
class MasterBootRecord(FieldSet):
static_size = 512*8
def createFields(self):
yield RawBytes(self, "program", 446, "Boot program (Intel x86 machine code)")
yield PartitionHeader(self, "header[0]")
yield PartitionHeader(self, "header[1]")
yield PartitionHeader(self, "header[2]")
yield PartitionHeader(self, "header[3]")
yield textHandler(UInt16(self, "signature", "Signature (0xAA55)"), hexadecimal)
def _getPartitions(self):
return ( self[index] for index in xrange(1,5) )
headers = property(_getPartitions)
class Partition(FieldSet):
def createFields(self):
mbr = MasterBootRecord(self, "mbr")
yield mbr
# No error if we only want to analyse a backup of a mbr
if self.eof:
return
for start, index, header in sorted((hdr["LBA"].value, index, hdr)
for index, hdr in enumerate(mbr.headers) if hdr.isUsed()):
# Seek to the beginning of the partition
padding = self.seekByte(start * BLOCK_SIZE, "padding[]")
if padding:
yield padding
# Content of the partition
name = "partition[%u]" % index
size = BLOCK_SIZE * header["size"].value
desc = header["system"].display
if header["system"].value == 5:
yield Partition(self, name, desc, size * 8)
else:
yield RawBytes(self, name, size, desc)
# Padding at the end
if self.current_size < self._size:
yield self.seekBit(self._size, "end")
class MSDos_HardDrive(Parser, Partition):
endian = LITTLE_ENDIAN
MAGIC = "\x55\xAA"
PARSER_TAGS = {
"id": "msdos_harddrive",
"category": "file_system",
"description": "MS-DOS hard drive with Master Boot Record (MBR)",
"min_size": 512*8,
"file_ext": ("",),
# "magic": ((MAGIC, 510*8),),
}
def validate(self):
if self.stream.readBytes(510*8, 2) != self.MAGIC:
return "Invalid signature"
used = False
for hdr in self["mbr"].headers:
if hdr["bootable"].value not in (0x00, 0x80):
return "Wrong boot flag"
used |= hdr.isUsed()
return used or "No partition found"
| gpl-3.0 |
2uller/LotF | App/Lib/site-packages/numpy/oldnumeric/alter_code2.py | 101 | 4635 | """
This module converts code written for numpy.oldnumeric to work
with numpy
FIXME: Flesh this out.
Makes the following changes:
* Converts typecharacters '1swu' to 'bhHI' respectively
when used as typecodes
* Changes import statements
* Change typecode= to dtype=
* Eliminates savespace=xxx keyword arguments
* Removes it when keyword is not given as well
* replaces matrixmultiply with dot
* converts functions that don't give axis= keyword that have changed
* converts functions that don't give typecode= keyword that have changed
* converts use of capitalized type-names
* converts old function names in oldnumeric.linear_algebra,
oldnumeric.random_array, and oldnumeric.fft
"""
#__all__ = ['convertfile', 'convertall', 'converttree']
__all__ = []
import warnings
warnings.warn("numpy.oldnumeric.alter_code2 is not working yet.")
import sys
import os
import re
import glob
# To convert typecharacters we need to
# Not very safe. Disabled for now..
def replacetypechars(astr):
astr = astr.replace("'s'","'h'")
astr = astr.replace("'b'","'B'")
astr = astr.replace("'1'","'b'")
astr = astr.replace("'w'","'H'")
astr = astr.replace("'u'","'I'")
return astr
def changeimports(fstr, name, newname):
importstr = 'import %s' % name
importasstr = 'import %s as ' % name
fromstr = 'from %s import ' % name
fromall=0
fstr = fstr.replace(importasstr, 'import %s as ' % newname)
fstr = fstr.replace(importstr, 'import %s as %s' % (newname,name))
ind = 0
Nlen = len(fromstr)
Nlen2 = len("from %s import " % newname)
while 1:
found = fstr.find(fromstr,ind)
if (found < 0):
break
ind = found + Nlen
if fstr[ind] == '*':
continue
fstr = "%sfrom %s import %s" % (fstr[:found], newname, fstr[ind:])
ind += Nlen2 - Nlen
return fstr, fromall
def replaceattr(astr):
astr = astr.replace("matrixmultiply","dot")
return astr
def replaceother(astr):
astr = re.sub(r'typecode\s*=', 'dtype=', astr)
astr = astr.replace('ArrayType', 'ndarray')
astr = astr.replace('NewAxis', 'newaxis')
return astr
import datetime
def fromstr(filestr):
#filestr = replacetypechars(filestr)
filestr, fromall1 = changeimports(filestr, 'numpy.oldnumeric', 'numpy')
filestr, fromall1 = changeimports(filestr, 'numpy.core.multiarray', 'numpy')
filestr, fromall1 = changeimports(filestr, 'numpy.core.umath', 'numpy')
filestr, fromall3 = changeimports(filestr, 'LinearAlgebra',
'numpy.linalg.old')
filestr, fromall3 = changeimports(filestr, 'RNG', 'numpy.random.oldrng')
filestr, fromall3 = changeimports(filestr, 'RNG.Statistics', 'numpy.random.oldrngstats')
filestr, fromall3 = changeimports(filestr, 'RandomArray', 'numpy.random.oldrandomarray')
filestr, fromall3 = changeimports(filestr, 'FFT', 'numpy.fft.old')
filestr, fromall3 = changeimports(filestr, 'MA', 'numpy.core.ma')
fromall = fromall1 or fromall2 or fromall3
filestr = replaceattr(filestr)
filestr = replaceother(filestr)
today = datetime.date.today().strftime('%b %d, %Y')
name = os.path.split(sys.argv[0])[-1]
filestr = '## Automatically adapted for '\
'numpy %s by %s\n\n%s' % (today, name, filestr)
return filestr
def makenewfile(name, filestr):
fid = file(name, 'w')
fid.write(filestr)
fid.close()
def getandcopy(name):
fid = file(name)
filestr = fid.read()
fid.close()
base, ext = os.path.splitext(name)
makenewfile(base+'.orig', filestr)
return filestr
def convertfile(filename):
"""Convert the filename given from using Numeric to using NumPy
Copies the file to filename.orig and then over-writes the file
with the updated code
"""
filestr = getandcopy(filename)
filestr = fromstr(filestr)
makenewfile(filename, filestr)
def fromargs(args):
filename = args[1]
convertfile(filename)
def convertall(direc=os.path.curdir):
"""Convert all .py files to use NumPy (from Numeric) in the directory given
For each file, a backup of <usesnumeric>.py is made as
<usesnumeric>.py.orig. A new file named <usesnumeric>.py
is then written with the updated code.
"""
files = glob.glob(os.path.join(direc,'*.py'))
for afile in files:
convertfile(afile)
def _func(arg, dirname, fnames):
convertall(dirname)
def converttree(direc=os.path.curdir):
"""Convert all .py files in the tree given
"""
os.path.walk(direc, _func, None)
if __name__ == '__main__':
fromargs(sys.argv)
| gpl-2.0 |
waile23/todo | models/pduser.py | 1 | 2906 | # -*- coding: utf-8 -*-
from basemodel import *
import md5
import math
import sys
class PDuser(BaseModel):
'''model autocreate by createModel'''
table_name = 'pd_user'
#db_name = 'todo_local'
db_name = web.config.write_db_name
def _format_user(self, row):
if hasattr(row, 'u_logo'):
if not row.u_logo:
row.u_logo = "/static/img/default_logo.png"
return row
def load_by_id(self, id, iscache=True, isformat=True):
mkey = self.create_pri_cache_key(u_id=id)
ret = BaseModel.memget(mkey)
if not iscache or not ret:
rows = self.reader().select(self.table_name, where="u_id=$uid", vars={"uid":id})
for row in rows:
if isformat:
ret = self._format_user(row)
else:
ret = row
break
BaseModel.memset(mkey, ret)
return ret
def check_name(self, name,loginid=0):
ret = self.reader().select(self.table_name, where="u_name=$name and u_id not in ($loginid)", vars={"name":name,"loginid":loginid})
for v in ret:
return True
return False
def check_name_count(self, name):
ret = self.reader().select(self.table_name,what="count(1) as count", where="u_name=$name", vars={"name":name})
for v in ret:
return v.count
return 0
def check_email(self, email,loginid=0):
ret = self.reader().select(self.table_name, where="u_email=$email and u_id not in ($loginid)", vars={"email":email,"loginid":loginid})
for v in ret:
return True
return False
def user_list(self,page=0,size=15,iscache=True,isformat=True):
mkey=md5.new(self.__class__.__name__+"."+sys._getframe().f_code.co_name+"_page_"+str(page)+"_size_"+str(size)).hexdigest()
ret=BaseModel.memget(mkey)
if not iscache or not ret:
ret=[]
ret_i = self.reader().select(self.table_name,order="u_create_time desc",limit=size,offset=page*size)
for row in ret_i:
if isformat:
ret.append(self._format_user(row))
else:
ret.append(row)
BaseModel.memset(mkey,ret)
return ret
def loaduser_by_email(self, email):
rows = self.reader().select(self.table_name, where="u_email=$email", vars={"email":email})
ret = None
for row in rows:
ret = row
break
return ret
def loaduser_by_social(self, fr, auth):
rows = self.reader().select(self.table_name, where="u_from='" + fr + "' and u_auth='" + auth + "'")
ret = None
for row in rows:
ret = row
break
return ret
def insert_by_list(self, rows):
ret = self.writer().multiple_insert(self.table_name, rows)
for i in ret:
self.memdel(self.create_pri_cache_key(u_id=i))
return ret
def update_by_insert(self, row):
sql = ["update"]
sql.append(self.table_name)
sql.append("set")
tmp = []
for k in row:
tmp.append(k + "=$" + k)
sql.append(",".join(tmp))
sql.append("where u_id=$u_id")
sqlstr = " ".join(sql)
self.writer().query(sqlstr, row)
self.memdel(self.create_pri_cache_key(u_id=row.u_id))
pduser = PDuser() #public instance
| mit |
Michal-Fularz/codingame_solutions | codingame_solutions/medium/medium_The_Paranoid_Android.py | 1 | 3099 | __author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
class Floor:
def __init__(self, width, contains_exit=False, exit_position=-1):
self.width = width
self.__contains_elevator = False
self.__elevator_position = -1
self.__contains_exit = contains_exit
self.__exit_position = exit_position
def add_exit(self, exit_position):
self.__contains_exit = True
self.__exit_position = exit_position
def add_elevator(self, elevator_position):
self.__contains_elevator = True
self.__elevator_position = elevator_position
def should_be_blocked(self, position, direction):
flag_should_be_blocked = False
if self.__contains_elevator:
if position > self.__elevator_position and direction == "RIGHT" or \
position < self.__elevator_position and direction == "LEFT":
flag_should_be_blocked = True
elif self.__contains_exit:
if position > self.__exit_position and direction == "RIGHT" or \
position < self.__exit_position and direction == "LEFT":
flag_should_be_blocked = True
return flag_should_be_blocked
class Drive:
def __init__(self):
self.floors = []
self.load_from_input()
def load_from_input(self):
# nb_floors: number of floors
# width: width of the area
# nb_rounds: maximum number of rounds
# exit_floor: floor on which the exit is found
# exit_pos: position of the exit on its floor
# nb_total_clones: number of generated clones
# nb_additional_elevators: ignore (always zero)
# nb_elevators: number of elevators
nb_floors, width, nb_rounds, exit_floor, exit_pos, nb_total_clones, nb_additional_elevators, nb_elevators = [int(i) for i in input().split()]
for i in range(nb_floors):
self.floors.append(Floor(width))
self.floors[exit_floor].add_exit(exit_pos)
for i in range(nb_elevators):
# elevator_floor: floor on which this elevator is found
# elevator_pos: position of the elevator on its floor
elevator_floor, elevator_pos = [int(j) for j in input().split()]
self.floors[elevator_floor].add_elevator(elevator_pos)
if __name__ == '__main__':
drive = Drive()
flag_do_the_blocking = False
# game loop
while 1:
# clone_floor: floor of the leading clone
# clone_pos: position of the leading clone on its floor
# direction: direction of the leading clone: LEFT or RIGHT
clone_floor, clone_pos, direction = input().split()
clone_floor = int(clone_floor)
clone_pos = int(clone_pos)
flag_do_the_blocking = drive.floors[clone_floor].should_be_blocked(clone_pos, direction)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
# action: WAIT or BLOCK
if flag_do_the_blocking:
print("BLOCK")
else:
print("WAIT")
| mit |
JingheZ/shogun | tests/integration/python_modular/distribution.py | 21 | 1835 | """
Test Distribution
"""
from numpy import inf, nan
from modshogun import *
import util
def _evaluate (indata):
prefix='distribution_'
feats=util.get_features(indata, prefix)
if indata[prefix+'name']=='HMM':
distribution=HMM(feats['train'], indata[prefix+'N'],
indata[prefix+'M'], indata[prefix+'pseudo'])
distribution.train()
distribution.baum_welch_viterbi_train(BW_NORMAL)
else:
dfun=eval(indata[prefix+'name'])
distribution=dfun(feats['train'])
distribution.train()
likelihood=distribution.get_log_likelihood_sample()
num_examples=feats['train'].get_num_vectors()
num_param=distribution.get_num_model_parameters()
derivatives=0
for i in range(num_param):
for j in range(num_examples):
val=distribution.get_log_derivative(i, j)
if val!=-inf and val!=nan: # only consider sparse matrix!
derivatives+=val
derivatives=abs(derivatives-indata[prefix+'derivatives'])
likelihood=abs(likelihood-indata[prefix+'likelihood'])
if indata[prefix+'name']=='HMM':
best_path=0
best_path_state=0
for i in range(indata[prefix+'num_examples']):
best_path+=distribution.best_path(i)
for j in range(indata[prefix+'N']):
best_path_state+=distribution.get_best_path_state(i, j)
best_path=abs(best_path-indata[prefix+'best_path'])
best_path_state=abs(best_path_state-\
indata[prefix+'best_path_state'])
return util.check_accuracy(indata[prefix+'accuracy'],
derivatives=derivatives, likelihood=likelihood,
best_path=best_path, best_path_state=best_path_state)
else:
return util.check_accuracy(indata[prefix+'accuracy'],
derivatives=derivatives, likelihood=likelihood)
########################################################################
# public
########################################################################
def test (indata):
return _evaluate(indata)
| gpl-3.0 |
johnkit/vtk-dev | ThirdParty/Twisted/twisted/mail/test/test_mail.py | 26 | 73828 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for large portions of L{twisted.mail}.
"""
import os
import errno
import shutil
import pickle
import StringIO
import rfc822
import tempfile
import signal
from hashlib import md5
from zope.interface.verify import verifyClass
from zope.interface import Interface, implements
from twisted.trial import unittest
from twisted.mail import smtp
from twisted.mail import pop3
from twisted.names import dns
from twisted.internet import protocol
from twisted.internet import defer
from twisted.internet.defer import Deferred
from twisted.internet import reactor
from twisted.internet import interfaces
from twisted.internet import task
from twisted.internet.error import DNSLookupError, CannotListenError
from twisted.internet.error import ProcessDone, ProcessTerminated
from twisted.internet import address
from twisted.python import failure
from twisted.python.filepath import FilePath
from twisted import mail
import twisted.mail.mail
import twisted.mail.maildir
import twisted.mail.relay
import twisted.mail.relaymanager
import twisted.mail.protocols
import twisted.mail.alias
from twisted.names.error import DNSNameError
from twisted.names.dns import RRHeader, Record_CNAME, Record_MX
from twisted import cred
import twisted.cred.credentials
import twisted.cred.checkers
import twisted.cred.portal
from twisted.test.proto_helpers import LineSendingProtocol
class DomainWithDefaultsTestCase(unittest.TestCase):
def testMethods(self):
d = dict([(x, x + 10) for x in range(10)])
d = mail.mail.DomainWithDefaultDict(d, 'Default')
self.assertEqual(len(d), 10)
self.assertEqual(list(iter(d)), range(10))
self.assertEqual(list(d.iterkeys()), list(iter(d)))
items = list(d.iteritems())
items.sort()
self.assertEqual(items, [(x, x + 10) for x in range(10)])
values = list(d.itervalues())
values.sort()
self.assertEqual(values, range(10, 20))
items = d.items()
items.sort()
self.assertEqual(items, [(x, x + 10) for x in range(10)])
values = d.values()
values.sort()
self.assertEqual(values, range(10, 20))
for x in range(10):
self.assertEqual(d[x], x + 10)
self.assertEqual(d.get(x), x + 10)
self.failUnless(x in d)
self.failUnless(d.has_key(x))
del d[2], d[4], d[6]
self.assertEqual(len(d), 7)
self.assertEqual(d[2], 'Default')
self.assertEqual(d[4], 'Default')
self.assertEqual(d[6], 'Default')
d.update({'a': None, 'b': (), 'c': '*'})
self.assertEqual(len(d), 10)
self.assertEqual(d['a'], None)
self.assertEqual(d['b'], ())
self.assertEqual(d['c'], '*')
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(d.setdefault('key', 'value'), 'value')
self.assertEqual(d['key'], 'value')
self.assertEqual(d.popitem(), ('key', 'value'))
self.assertEqual(len(d), 0)
dcopy = d.copy()
self.assertEqual(d.domains, dcopy.domains)
self.assertEqual(d.default, dcopy.default)
def _stringificationTest(self, stringifier):
"""
Assert that the class name of a L{mail.mail.DomainWithDefaultDict}
instance and the string-formatted underlying domain dictionary both
appear in the string produced by the given string-returning function.
@type stringifier: one-argument callable
@param stringifier: either C{str} or C{repr}, to be used to get a
string to make assertions against.
"""
domain = mail.mail.DomainWithDefaultDict({}, 'Default')
self.assertIn(domain.__class__.__name__, stringifier(domain))
domain['key'] = 'value'
self.assertIn(str({'key': 'value'}), stringifier(domain))
def test_str(self):
"""
L{DomainWithDefaultDict.__str__} should return a string including
the class name and the domain mapping held by the instance.
"""
self._stringificationTest(str)
def test_repr(self):
"""
L{DomainWithDefaultDict.__repr__} should return a string including
the class name and the domain mapping held by the instance.
"""
self._stringificationTest(repr)
class BounceTestCase(unittest.TestCase):
def setUp(self):
self.domain = mail.mail.BounceDomain()
def testExists(self):
self.assertRaises(smtp.AddressError, self.domain.exists, "any user")
def testRelay(self):
self.assertEqual(
self.domain.willRelay("random q emailer", "protocol"),
False
)
def testAddUser(self):
self.domain.addUser("bob", "password")
self.assertRaises(smtp.SMTPBadRcpt, self.domain.exists, "bob")
class FileMessageTestCase(unittest.TestCase):
def setUp(self):
self.name = "fileMessage.testFile"
self.final = "final.fileMessage.testFile"
self.f = file(self.name, 'w')
self.fp = mail.mail.FileMessage(self.f, self.name, self.final)
def tearDown(self):
try:
self.f.close()
except:
pass
try:
os.remove(self.name)
except:
pass
try:
os.remove(self.final)
except:
pass
def testFinalName(self):
return self.fp.eomReceived().addCallback(self._cbFinalName)
def _cbFinalName(self, result):
self.assertEqual(result, self.final)
self.failUnless(self.f.closed)
self.failIf(os.path.exists(self.name))
def testContents(self):
contents = "first line\nsecond line\nthird line\n"
for line in contents.splitlines():
self.fp.lineReceived(line)
self.fp.eomReceived()
self.assertEqual(file(self.final).read(), contents)
def testInterrupted(self):
contents = "first line\nsecond line\n"
for line in contents.splitlines():
self.fp.lineReceived(line)
self.fp.connectionLost()
self.failIf(os.path.exists(self.name))
self.failIf(os.path.exists(self.final))
class MailServiceTestCase(unittest.TestCase):
def setUp(self):
self.service = mail.mail.MailService()
def testFactories(self):
f = self.service.getPOP3Factory()
self.failUnless(isinstance(f, protocol.ServerFactory))
self.failUnless(f.buildProtocol(('127.0.0.1', 12345)), pop3.POP3)
f = self.service.getSMTPFactory()
self.failUnless(isinstance(f, protocol.ServerFactory))
self.failUnless(f.buildProtocol(('127.0.0.1', 12345)), smtp.SMTP)
f = self.service.getESMTPFactory()
self.failUnless(isinstance(f, protocol.ServerFactory))
self.failUnless(f.buildProtocol(('127.0.0.1', 12345)), smtp.ESMTP)
def testPortals(self):
o1 = object()
o2 = object()
self.service.portals['domain'] = o1
self.service.portals[''] = o2
self.failUnless(self.service.lookupPortal('domain') is o1)
self.failUnless(self.service.defaultPortal() is o2)
class StringListMailboxTests(unittest.TestCase):
"""
Tests for L{StringListMailbox}, an in-memory only implementation of
L{pop3.IMailbox}.
"""
def test_listOneMessage(self):
"""
L{StringListMailbox.listMessages} returns the length of the message at
the offset into the mailbox passed to it.
"""
mailbox = mail.maildir.StringListMailbox(["abc", "ab", "a"])
self.assertEqual(mailbox.listMessages(0), 3)
self.assertEqual(mailbox.listMessages(1), 2)
self.assertEqual(mailbox.listMessages(2), 1)
def test_listAllMessages(self):
"""
L{StringListMailbox.listMessages} returns a list of the lengths of all
messages if not passed an index.
"""
mailbox = mail.maildir.StringListMailbox(["a", "abc", "ab"])
self.assertEqual(mailbox.listMessages(), [1, 3, 2])
def test_getMessage(self):
"""
L{StringListMailbox.getMessage} returns a file-like object from which
the contents of the message at the given offset into the mailbox can be
read.
"""
mailbox = mail.maildir.StringListMailbox(["foo", "real contents"])
self.assertEqual(mailbox.getMessage(1).read(), "real contents")
def test_getUidl(self):
"""
L{StringListMailbox.getUidl} returns a unique identifier for the
message at the given offset into the mailbox.
"""
mailbox = mail.maildir.StringListMailbox(["foo", "bar"])
self.assertNotEqual(mailbox.getUidl(0), mailbox.getUidl(1))
def test_deleteMessage(self):
"""
L{StringListMailbox.deleteMessage} marks a message for deletion causing
further requests for its length to return 0.
"""
mailbox = mail.maildir.StringListMailbox(["foo"])
mailbox.deleteMessage(0)
self.assertEqual(mailbox.listMessages(0), 0)
self.assertEqual(mailbox.listMessages(), [0])
def test_undeleteMessages(self):
"""
L{StringListMailbox.undeleteMessages} causes any messages marked for
deletion to be returned to their original state.
"""
mailbox = mail.maildir.StringListMailbox(["foo"])
mailbox.deleteMessage(0)
mailbox.undeleteMessages()
self.assertEqual(mailbox.listMessages(0), 3)
self.assertEqual(mailbox.listMessages(), [3])
def test_sync(self):
"""
L{StringListMailbox.sync} causes any messages as marked for deletion to
be permanently deleted.
"""
mailbox = mail.maildir.StringListMailbox(["foo"])
mailbox.deleteMessage(0)
mailbox.sync()
mailbox.undeleteMessages()
self.assertEqual(mailbox.listMessages(0), 0)
self.assertEqual(mailbox.listMessages(), [0])
class FailingMaildirMailboxAppendMessageTask(mail.maildir._MaildirMailboxAppendMessageTask):
_openstate = True
_writestate = True
_renamestate = True
def osopen(self, fn, attr, mode):
if self._openstate:
return os.open(fn, attr, mode)
else:
raise OSError(errno.EPERM, "Faked Permission Problem")
def oswrite(self, fh, data):
if self._writestate:
return os.write(fh, data)
else:
raise OSError(errno.ENOSPC, "Faked Space problem")
def osrename(self, oldname, newname):
if self._renamestate:
return os.rename(oldname, newname)
else:
raise OSError(errno.EPERM, "Faked Permission Problem")
class _AppendTestMixin(object):
"""
Mixin for L{MaildirMailbox.appendMessage} test cases which defines a helper
for serially appending multiple messages to a mailbox.
"""
def _appendMessages(self, mbox, messages):
"""
Deliver the given messages one at a time. Delivery is serialized to
guarantee a predictable order in the mailbox (overlapped message deliver
makes no guarantees about which message which appear first).
"""
results = []
def append():
for m in messages:
d = mbox.appendMessage(m)
d.addCallback(results.append)
yield d
d = task.cooperate(append()).whenDone()
d.addCallback(lambda ignored: results)
return d
class MaildirAppendStringTestCase(unittest.TestCase, _AppendTestMixin):
"""
Tests for L{MaildirMailbox.appendMessage} when invoked with a C{str}.
"""
def setUp(self):
self.d = self.mktemp()
mail.maildir.initializeMaildir(self.d)
def _append(self, ignored, mbox):
d = mbox.appendMessage('TEST')
return self.assertFailure(d, Exception)
def _setState(self, ignored, mbox, rename=None, write=None, open=None):
"""
Change the behavior of future C{rename}, C{write}, or C{open} calls made
by the mailbox C{mbox}.
@param rename: If not C{None}, a new value for the C{_renamestate}
attribute of the mailbox's append factory. The original value will
be restored at the end of the test.
@param write: Like C{rename}, but for the C{_writestate} attribute.
@param open: Like C{rename}, but for the C{_openstate} attribute.
"""
if rename is not None:
self.addCleanup(
setattr, mbox.AppendFactory, '_renamestate',
mbox.AppendFactory._renamestate)
mbox.AppendFactory._renamestate = rename
if write is not None:
self.addCleanup(
setattr, mbox.AppendFactory, '_writestate',
mbox.AppendFactory._writestate)
mbox.AppendFactory._writestate = write
if open is not None:
self.addCleanup(
setattr, mbox.AppendFactory, '_openstate',
mbox.AppendFactory._openstate)
mbox.AppendFactory._openstate = open
def test_append(self):
"""
L{MaildirMailbox.appendMessage} returns a L{Deferred} which fires when
the message has been added to the end of the mailbox.
"""
mbox = mail.maildir.MaildirMailbox(self.d)
mbox.AppendFactory = FailingMaildirMailboxAppendMessageTask
d = self._appendMessages(mbox, ["X" * i for i in range(1, 11)])
d.addCallback(self.assertEqual, [None] * 10)
d.addCallback(self._cbTestAppend, mbox)
return d
def _cbTestAppend(self, ignored, mbox):
"""
Check that the mailbox has the expected number (ten) of messages in it,
and that each has the expected contents, and that they are in the same
order as that in which they were appended.
"""
self.assertEqual(len(mbox.listMessages()), 10)
self.assertEqual(
[len(mbox.getMessage(i).read()) for i in range(10)],
range(1, 11))
# test in the right order: last to first error location.
self._setState(None, mbox, rename=False)
d = self._append(None, mbox)
d.addCallback(self._setState, mbox, rename=True, write=False)
d.addCallback(self._append, mbox)
d.addCallback(self._setState, mbox, write=True, open=False)
d.addCallback(self._append, mbox)
d.addCallback(self._setState, mbox, open=True)
return d
class MaildirAppendFileTestCase(unittest.TestCase, _AppendTestMixin):
"""
Tests for L{MaildirMailbox.appendMessage} when invoked with a C{str}.
"""
def setUp(self):
self.d = self.mktemp()
mail.maildir.initializeMaildir(self.d)
def test_append(self):
"""
L{MaildirMailbox.appendMessage} returns a L{Deferred} which fires when
the message has been added to the end of the mailbox.
"""
mbox = mail.maildir.MaildirMailbox(self.d)
messages = []
for i in xrange(1, 11):
temp = tempfile.TemporaryFile()
temp.write("X" * i)
temp.seek(0, 0)
messages.append(temp)
self.addCleanup(temp.close)
d = self._appendMessages(mbox, messages)
d.addCallback(self._cbTestAppend, mbox)
return d
def _cbTestAppend(self, result, mbox):
"""
Check that the mailbox has the expected number (ten) of messages in it,
and that each has the expected contents, and that they are in the same
order as that in which they were appended.
"""
self.assertEqual(len(mbox.listMessages()), 10)
self.assertEqual(
[len(mbox.getMessage(i).read()) for i in range(10)],
range(1, 11))
class MaildirTestCase(unittest.TestCase):
def setUp(self):
self.d = self.mktemp()
mail.maildir.initializeMaildir(self.d)
def tearDown(self):
shutil.rmtree(self.d)
def testInitializer(self):
d = self.d
trash = os.path.join(d, '.Trash')
self.failUnless(os.path.exists(d) and os.path.isdir(d))
self.failUnless(os.path.exists(os.path.join(d, 'new')))
self.failUnless(os.path.exists(os.path.join(d, 'cur')))
self.failUnless(os.path.exists(os.path.join(d, 'tmp')))
self.failUnless(os.path.isdir(os.path.join(d, 'new')))
self.failUnless(os.path.isdir(os.path.join(d, 'cur')))
self.failUnless(os.path.isdir(os.path.join(d, 'tmp')))
self.failUnless(os.path.exists(os.path.join(trash, 'new')))
self.failUnless(os.path.exists(os.path.join(trash, 'cur')))
self.failUnless(os.path.exists(os.path.join(trash, 'tmp')))
self.failUnless(os.path.isdir(os.path.join(trash, 'new')))
self.failUnless(os.path.isdir(os.path.join(trash, 'cur')))
self.failUnless(os.path.isdir(os.path.join(trash, 'tmp')))
def test_nameGenerator(self):
"""
Each call to L{_MaildirNameGenerator.generate} returns a unique
string suitable for use as the basename of a new message file. The
names are ordered such that those generated earlier sort less than
those generated later.
"""
clock = task.Clock()
clock.advance(0.05)
generator = mail.maildir._MaildirNameGenerator(clock)
firstName = generator.generate()
clock.advance(0.05)
secondName = generator.generate()
self.assertTrue(firstName < secondName)
def test_mailbox(self):
"""
Exercise the methods of L{IMailbox} as implemented by
L{MaildirMailbox}.
"""
j = os.path.join
n = mail.maildir._generateMaildirName
msgs = [j(b, n()) for b in ('cur', 'new') for x in range(5)]
# Toss a few files into the mailbox
i = 1
for f in msgs:
fObj = file(j(self.d, f), 'w')
fObj.write('x' * i)
fObj.close()
i = i + 1
mb = mail.maildir.MaildirMailbox(self.d)
self.assertEqual(mb.listMessages(), range(1, 11))
self.assertEqual(mb.listMessages(1), 2)
self.assertEqual(mb.listMessages(5), 6)
self.assertEqual(mb.getMessage(6).read(), 'x' * 7)
self.assertEqual(mb.getMessage(1).read(), 'x' * 2)
d = {}
for i in range(10):
u = mb.getUidl(i)
self.failIf(u in d)
d[u] = None
p, f = os.path.split(msgs[5])
mb.deleteMessage(5)
self.assertEqual(mb.listMessages(5), 0)
self.failUnless(os.path.exists(j(self.d, '.Trash', 'cur', f)))
self.failIf(os.path.exists(j(self.d, msgs[5])))
mb.undeleteMessages()
self.assertEqual(mb.listMessages(5), 6)
self.failIf(os.path.exists(j(self.d, '.Trash', 'cur', f)))
self.failUnless(os.path.exists(j(self.d, msgs[5])))
class AbstractMaildirDomainTestCase(unittest.TestCase):
"""
Tests for L{twisted.mail.maildir.AbstractMaildirDomain}.
"""
def test_interface(self):
"""
L{maildir.AbstractMaildirDomain} implements L{mail.IAliasableDomain}.
"""
verifyClass(mail.mail.IAliasableDomain,
mail.maildir.AbstractMaildirDomain)
class MaildirDirdbmDomainTestCase(unittest.TestCase):
"""
Tests for L{MaildirDirdbmDomain}.
"""
def setUp(self):
"""
Create a temporary L{MaildirDirdbmDomain} and parent
L{MailService} before running each test.
"""
self.P = self.mktemp()
self.S = mail.mail.MailService()
self.D = mail.maildir.MaildirDirdbmDomain(self.S, self.P)
def tearDown(self):
"""
Remove the temporary C{maildir} directory when the test has
finished.
"""
shutil.rmtree(self.P)
def test_addUser(self):
"""
L{MaildirDirdbmDomain.addUser} accepts a user and password
argument. It stores those in a C{dbm} dictionary
attribute and creates a directory for each user.
"""
toAdd = (('user1', 'pwd1'), ('user2', 'pwd2'), ('user3', 'pwd3'))
for (u, p) in toAdd:
self.D.addUser(u, p)
for (u, p) in toAdd:
self.failUnless(u in self.D.dbm)
self.assertEqual(self.D.dbm[u], p)
self.failUnless(os.path.exists(os.path.join(self.P, u)))
def test_credentials(self):
"""
L{MaildirDirdbmDomain.getCredentialsCheckers} initializes and
returns one L{ICredentialsChecker} checker by default.
"""
creds = self.D.getCredentialsCheckers()
self.assertEqual(len(creds), 1)
self.failUnless(cred.checkers.ICredentialsChecker.providedBy(creds[0]))
self.failUnless(cred.credentials.IUsernamePassword in creds[0].credentialInterfaces)
def test_requestAvatar(self):
"""
L{MaildirDirdbmDomain.requestAvatar} raises L{NotImplementedError}
unless it is supplied with an L{pop3.IMailbox} interface.
When called with an L{pop3.IMailbox}, it returns a 3-tuple
containing L{pop3.IMailbox}, an implementation of that interface
and a NOOP callable.
"""
class ISomething(Interface):
pass
self.D.addUser('user', 'password')
self.assertRaises(
NotImplementedError,
self.D.requestAvatar, 'user', None, ISomething
)
t = self.D.requestAvatar('user', None, pop3.IMailbox)
self.assertEqual(len(t), 3)
self.failUnless(t[0] is pop3.IMailbox)
self.failUnless(pop3.IMailbox.providedBy(t[1]))
t[2]()
def test_requestAvatarId(self):
"""
L{DirdbmDatabase.requestAvatarId} raises L{UnauthorizedLogin} if
supplied with invalid user credentials.
When called with valid credentials, L{requestAvatarId} returns
the username associated with the supplied credentials.
"""
self.D.addUser('user', 'password')
database = self.D.getCredentialsCheckers()[0]
creds = cred.credentials.UsernamePassword('user', 'wrong password')
self.assertRaises(
cred.error.UnauthorizedLogin,
database.requestAvatarId, creds
)
creds = cred.credentials.UsernamePassword('user', 'password')
self.assertEqual(database.requestAvatarId(creds), 'user')
def test_userDirectory(self):
"""
L{MaildirDirdbmDomain.userDirectory} is supplied with a user name
and returns the path to that user's maildir subdirectory.
Calling L{MaildirDirdbmDomain.userDirectory} with a
non-existent user returns the 'postmaster' directory if there
is a postmaster or returns L{None} if there is no postmaster.
"""
self.D.addUser('user', 'password')
self.assertEqual(self.D.userDirectory('user'),
os.path.join(self.D.root, 'user'))
self.D.postmaster = False
self.assertIdentical(self.D.userDirectory('nouser'), None)
self.D.postmaster = True
self.assertEqual(self.D.userDirectory('nouser'),
os.path.join(self.D.root, 'postmaster'))
class StubAliasableDomain(object):
"""
Minimal testable implementation of IAliasableDomain.
"""
implements(mail.mail.IAliasableDomain)
def exists(self, user):
"""
No test coverage for invocations of this method on domain objects,
so we just won't implement it.
"""
raise NotImplementedError()
def addUser(self, user, password):
"""
No test coverage for invocations of this method on domain objects,
so we just won't implement it.
"""
raise NotImplementedError()
def getCredentialsCheckers(self):
"""
This needs to succeed in order for other tests to complete
successfully, but we don't actually assert anything about its
behavior. Return an empty list. Sometime later we should return
something else and assert that a portal got set up properly.
"""
return []
def setAliasGroup(self, aliases):
"""
Just record the value so the test can check it later.
"""
self.aliasGroup = aliases
class ServiceDomainTestCase(unittest.TestCase):
def setUp(self):
self.S = mail.mail.MailService()
self.D = mail.protocols.DomainDeliveryBase(self.S, None)
self.D.service = self.S
self.D.protocolName = 'TEST'
self.D.host = 'hostname'
self.tmpdir = self.mktemp()
domain = mail.maildir.MaildirDirdbmDomain(self.S, self.tmpdir)
domain.addUser('user', 'password')
self.S.addDomain('test.domain', domain)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testAddAliasableDomain(self):
"""
Test that adding an IAliasableDomain to a mail service properly sets
up alias group references and such.
"""
aliases = object()
domain = StubAliasableDomain()
self.S.aliases = aliases
self.S.addDomain('example.com', domain)
self.assertIdentical(domain.aliasGroup, aliases)
def testReceivedHeader(self):
hdr = self.D.receivedHeader(
('remotehost', '123.232.101.234'),
smtp.Address('<someguy@somplace>'),
['user@host.name']
)
fp = StringIO.StringIO(hdr)
m = rfc822.Message(fp)
self.assertEqual(len(m.items()), 1)
self.assertIn('Received', m)
def testValidateTo(self):
user = smtp.User('user@test.domain', 'helo', None, 'wherever@whatever')
return defer.maybeDeferred(self.D.validateTo, user
).addCallback(self._cbValidateTo
)
def _cbValidateTo(self, result):
self.failUnless(callable(result))
def testValidateToBadUsername(self):
user = smtp.User('resu@test.domain', 'helo', None, 'wherever@whatever')
return self.assertFailure(
defer.maybeDeferred(self.D.validateTo, user),
smtp.SMTPBadRcpt)
def testValidateToBadDomain(self):
user = smtp.User('user@domain.test', 'helo', None, 'wherever@whatever')
return self.assertFailure(
defer.maybeDeferred(self.D.validateTo, user),
smtp.SMTPBadRcpt)
def testValidateFrom(self):
helo = ('hostname', '127.0.0.1')
origin = smtp.Address('<user@hostname>')
self.failUnless(self.D.validateFrom(helo, origin) is origin)
helo = ('hostname', '1.2.3.4')
origin = smtp.Address('<user@hostname>')
self.failUnless(self.D.validateFrom(helo, origin) is origin)
helo = ('hostname', '1.2.3.4')
origin = smtp.Address('<>')
self.failUnless(self.D.validateFrom(helo, origin) is origin)
self.assertRaises(
smtp.SMTPBadSender,
self.D.validateFrom, None, origin
)
class VirtualPOP3TestCase(unittest.TestCase):
def setUp(self):
self.tmpdir = self.mktemp()
self.S = mail.mail.MailService()
self.D = mail.maildir.MaildirDirdbmDomain(self.S, self.tmpdir)
self.D.addUser('user', 'password')
self.S.addDomain('test.domain', self.D)
portal = cred.portal.Portal(self.D)
map(portal.registerChecker, self.D.getCredentialsCheckers())
self.S.portals[''] = self.S.portals['test.domain'] = portal
self.P = mail.protocols.VirtualPOP3()
self.P.service = self.S
self.P.magic = '<unit test magic>'
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testAuthenticateAPOP(self):
resp = md5(self.P.magic + 'password').hexdigest()
return self.P.authenticateUserAPOP('user', resp
).addCallback(self._cbAuthenticateAPOP
)
def _cbAuthenticateAPOP(self, result):
self.assertEqual(len(result), 3)
self.assertEqual(result[0], pop3.IMailbox)
self.failUnless(pop3.IMailbox.providedBy(result[1]))
result[2]()
def testAuthenticateIncorrectUserAPOP(self):
resp = md5(self.P.magic + 'password').hexdigest()
return self.assertFailure(
self.P.authenticateUserAPOP('resu', resp),
cred.error.UnauthorizedLogin)
def testAuthenticateIncorrectResponseAPOP(self):
resp = md5('wrong digest').hexdigest()
return self.assertFailure(
self.P.authenticateUserAPOP('user', resp),
cred.error.UnauthorizedLogin)
def testAuthenticatePASS(self):
return self.P.authenticateUserPASS('user', 'password'
).addCallback(self._cbAuthenticatePASS
)
def _cbAuthenticatePASS(self, result):
self.assertEqual(len(result), 3)
self.assertEqual(result[0], pop3.IMailbox)
self.failUnless(pop3.IMailbox.providedBy(result[1]))
result[2]()
def testAuthenticateBadUserPASS(self):
return self.assertFailure(
self.P.authenticateUserPASS('resu', 'password'),
cred.error.UnauthorizedLogin)
def testAuthenticateBadPasswordPASS(self):
return self.assertFailure(
self.P.authenticateUserPASS('user', 'wrong password'),
cred.error.UnauthorizedLogin)
class empty(smtp.User):
def __init__(self):
pass
class RelayTestCase(unittest.TestCase):
def testExists(self):
service = mail.mail.MailService()
domain = mail.relay.DomainQueuer(service)
doRelay = [
address.UNIXAddress('/var/run/mail-relay'),
address.IPv4Address('TCP', '127.0.0.1', 12345),
]
dontRelay = [
address.IPv4Address('TCP', '192.168.2.1', 62),
address.IPv4Address('TCP', '1.2.3.4', 1943),
]
for peer in doRelay:
user = empty()
user.orig = 'user@host'
user.dest = 'tsoh@resu'
user.protocol = empty()
user.protocol.transport = empty()
user.protocol.transport.getPeer = lambda: peer
self.failUnless(callable(domain.exists(user)))
for peer in dontRelay:
user = empty()
user.orig = 'some@place'
user.protocol = empty()
user.protocol.transport = empty()
user.protocol.transport.getPeer = lambda: peer
user.dest = 'who@cares'
self.assertRaises(smtp.SMTPBadRcpt, domain.exists, user)
class RelayerTestCase(unittest.TestCase):
def setUp(self):
self.tmpdir = self.mktemp()
os.mkdir(self.tmpdir)
self.messageFiles = []
for i in range(10):
name = os.path.join(self.tmpdir, 'body-%d' % (i,))
f = file(name + '-H', 'w')
pickle.dump(['from-%d' % (i,), 'to-%d' % (i,)], f)
f.close()
f = file(name + '-D', 'w')
f.write(name)
f.seek(0, 0)
self.messageFiles.append(name)
self.R = mail.relay.RelayerMixin()
self.R.loadMessages(self.messageFiles)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testMailFrom(self):
for i in range(10):
self.assertEqual(self.R.getMailFrom(), 'from-%d' % (i,))
self.R.sentMail(250, None, None, None, None)
self.assertEqual(self.R.getMailFrom(), None)
def testMailTo(self):
for i in range(10):
self.assertEqual(self.R.getMailTo(), ['to-%d' % (i,)])
self.R.sentMail(250, None, None, None, None)
self.assertEqual(self.R.getMailTo(), None)
def testMailData(self):
for i in range(10):
name = os.path.join(self.tmpdir, 'body-%d' % (i,))
self.assertEqual(self.R.getMailData().read(), name)
self.R.sentMail(250, None, None, None, None)
self.assertEqual(self.R.getMailData(), None)
class Manager:
def __init__(self):
self.success = []
self.failure = []
self.done = []
def notifySuccess(self, factory, message):
self.success.append((factory, message))
def notifyFailure(self, factory, message):
self.failure.append((factory, message))
def notifyDone(self, factory):
self.done.append(factory)
class ManagedRelayerTestCase(unittest.TestCase):
def setUp(self):
self.manager = Manager()
self.messages = range(0, 20, 2)
self.factory = object()
self.relay = mail.relaymanager.ManagedRelayerMixin(self.manager)
self.relay.messages = self.messages[:]
self.relay.names = self.messages[:]
self.relay.factory = self.factory
def testSuccessfulSentMail(self):
for i in self.messages:
self.relay.sentMail(250, None, None, None, None)
self.assertEqual(
self.manager.success,
[(self.factory, m) for m in self.messages]
)
def testFailedSentMail(self):
for i in self.messages:
self.relay.sentMail(550, None, None, None, None)
self.assertEqual(
self.manager.failure,
[(self.factory, m) for m in self.messages]
)
def testConnectionLost(self):
self.relay.connectionLost(failure.Failure(Exception()))
self.assertEqual(self.manager.done, [self.factory])
class DirectoryQueueTestCase(unittest.TestCase):
def setUp(self):
# This is almost a test case itself.
self.tmpdir = self.mktemp()
os.mkdir(self.tmpdir)
self.queue = mail.relaymanager.Queue(self.tmpdir)
self.queue.noisy = False
for m in range(25):
hdrF, msgF = self.queue.createNewMessage()
pickle.dump(['header', m], hdrF)
hdrF.close()
msgF.lineReceived('body: %d' % (m,))
msgF.eomReceived()
self.queue.readDirectory()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testWaiting(self):
self.failUnless(self.queue.hasWaiting())
self.assertEqual(len(self.queue.getWaiting()), 25)
waiting = self.queue.getWaiting()
self.queue.setRelaying(waiting[0])
self.assertEqual(len(self.queue.getWaiting()), 24)
self.queue.setWaiting(waiting[0])
self.assertEqual(len(self.queue.getWaiting()), 25)
def testRelaying(self):
for m in self.queue.getWaiting():
self.queue.setRelaying(m)
self.assertEqual(
len(self.queue.getRelayed()),
25 - len(self.queue.getWaiting())
)
self.failIf(self.queue.hasWaiting())
relayed = self.queue.getRelayed()
self.queue.setWaiting(relayed[0])
self.assertEqual(len(self.queue.getWaiting()), 1)
self.assertEqual(len(self.queue.getRelayed()), 24)
def testDone(self):
msg = self.queue.getWaiting()[0]
self.queue.setRelaying(msg)
self.queue.done(msg)
self.assertEqual(len(self.queue.getWaiting()), 24)
self.assertEqual(len(self.queue.getRelayed()), 0)
self.failIf(msg in self.queue.getWaiting())
self.failIf(msg in self.queue.getRelayed())
def testEnvelope(self):
envelopes = []
for msg in self.queue.getWaiting():
envelopes.append(self.queue.getEnvelope(msg))
envelopes.sort()
for i in range(25):
self.assertEqual(
envelopes.pop(0),
['header', i]
)
from twisted.names import server
from twisted.names import client
from twisted.names import common
class TestAuthority(common.ResolverBase):
def __init__(self):
common.ResolverBase.__init__(self)
self.addresses = {}
def _lookup(self, name, cls, type, timeout = None):
if name in self.addresses and type == dns.MX:
results = []
for a in self.addresses[name]:
hdr = dns.RRHeader(
name, dns.MX, dns.IN, 60, dns.Record_MX(0, a)
)
results.append(hdr)
return defer.succeed((results, [], []))
return defer.fail(failure.Failure(dns.DomainError(name)))
def setUpDNS(self):
self.auth = TestAuthority()
factory = server.DNSServerFactory([self.auth])
protocol = dns.DNSDatagramProtocol(factory)
while 1:
self.port = reactor.listenTCP(0, factory, interface='127.0.0.1')
portNumber = self.port.getHost().port
try:
self.udpPort = reactor.listenUDP(portNumber, protocol, interface='127.0.0.1')
except CannotListenError:
self.port.stopListening()
else:
break
self.resolver = client.Resolver(servers=[('127.0.0.1', portNumber)])
def tearDownDNS(self):
dl = []
dl.append(defer.maybeDeferred(self.port.stopListening))
dl.append(defer.maybeDeferred(self.udpPort.stopListening))
try:
self.resolver._parseCall.cancel()
except:
pass
return defer.DeferredList(dl)
class MXTestCase(unittest.TestCase):
"""
Tests for L{mail.relaymanager.MXCalculator}.
"""
def setUp(self):
setUpDNS(self)
self.clock = task.Clock()
self.mx = mail.relaymanager.MXCalculator(self.resolver, self.clock)
def tearDown(self):
return tearDownDNS(self)
def test_defaultClock(self):
"""
L{MXCalculator}'s default clock is C{twisted.internet.reactor}.
"""
self.assertIdentical(
mail.relaymanager.MXCalculator(self.resolver).clock,
reactor)
def testSimpleSuccess(self):
self.auth.addresses['test.domain'] = ['the.email.test.domain']
return self.mx.getMX('test.domain').addCallback(self._cbSimpleSuccess)
def _cbSimpleSuccess(self, mx):
self.assertEqual(mx.preference, 0)
self.assertEqual(str(mx.name), 'the.email.test.domain')
def testSimpleFailure(self):
self.mx.fallbackToDomain = False
return self.assertFailure(self.mx.getMX('test.domain'), IOError)
def testSimpleFailureWithFallback(self):
return self.assertFailure(self.mx.getMX('test.domain'), DNSLookupError)
def _exchangeTest(self, domain, records, correctMailExchange):
"""
Issue an MX request for the given domain and arrange for it to be
responded to with the given records. Verify that the resulting mail
exchange is the indicated host.
@type domain: C{str}
@type records: C{list} of L{RRHeader}
@type correctMailExchange: C{str}
@rtype: L{Deferred}
"""
class DummyResolver(object):
def lookupMailExchange(self, name):
if name == domain:
return defer.succeed((
records,
[],
[]))
return defer.fail(DNSNameError(domain))
self.mx.resolver = DummyResolver()
d = self.mx.getMX(domain)
def gotMailExchange(record):
self.assertEqual(str(record.name), correctMailExchange)
d.addCallback(gotMailExchange)
return d
def test_mailExchangePreference(self):
"""
The MX record with the lowest preference is returned by
L{MXCalculator.getMX}.
"""
domain = "example.com"
good = "good.example.com"
bad = "bad.example.com"
records = [
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(1, bad)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, good)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(2, bad))]
return self._exchangeTest(domain, records, good)
def test_badExchangeExcluded(self):
"""
L{MXCalculator.getMX} returns the MX record with the lowest preference
which is not also marked as bad.
"""
domain = "example.com"
good = "good.example.com"
bad = "bad.example.com"
records = [
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, bad)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(1, good))]
self.mx.markBad(bad)
return self._exchangeTest(domain, records, good)
def test_fallbackForAllBadExchanges(self):
"""
L{MXCalculator.getMX} returns the MX record with the lowest preference
if all the MX records in the response have been marked bad.
"""
domain = "example.com"
bad = "bad.example.com"
worse = "worse.example.com"
records = [
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, bad)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(1, worse))]
self.mx.markBad(bad)
self.mx.markBad(worse)
return self._exchangeTest(domain, records, bad)
def test_badExchangeExpires(self):
"""
L{MXCalculator.getMX} returns the MX record with the lowest preference
if it was last marked bad longer than L{MXCalculator.timeOutBadMX}
seconds ago.
"""
domain = "example.com"
good = "good.example.com"
previouslyBad = "bad.example.com"
records = [
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, previouslyBad)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(1, good))]
self.mx.markBad(previouslyBad)
self.clock.advance(self.mx.timeOutBadMX)
return self._exchangeTest(domain, records, previouslyBad)
def test_goodExchangeUsed(self):
"""
L{MXCalculator.getMX} returns the MX record with the lowest preference
if it was marked good after it was marked bad.
"""
domain = "example.com"
good = "good.example.com"
previouslyBad = "bad.example.com"
records = [
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, previouslyBad)),
RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(1, good))]
self.mx.markBad(previouslyBad)
self.mx.markGood(previouslyBad)
self.clock.advance(self.mx.timeOutBadMX)
return self._exchangeTest(domain, records, previouslyBad)
def test_successWithoutResults(self):
"""
If an MX lookup succeeds but the result set is empty,
L{MXCalculator.getMX} should try to look up an I{A} record for the
requested name and call back its returned Deferred with that
address.
"""
ip = '1.2.3.4'
domain = 'example.org'
class DummyResolver(object):
"""
Fake resolver which will respond to an MX lookup with an empty
result set.
@ivar mx: A dictionary mapping hostnames to three-tuples of
results to be returned from I{MX} lookups.
@ivar a: A dictionary mapping hostnames to addresses to be
returned from I{A} lookups.
"""
mx = {domain: ([], [], [])}
a = {domain: ip}
def lookupMailExchange(self, domain):
return defer.succeed(self.mx[domain])
def getHostByName(self, domain):
return defer.succeed(self.a[domain])
self.mx.resolver = DummyResolver()
d = self.mx.getMX(domain)
d.addCallback(self.assertEqual, Record_MX(name=ip))
return d
def test_failureWithSuccessfulFallback(self):
"""
Test that if the MX record lookup fails, fallback is enabled, and an A
record is available for the name, then the Deferred returned by
L{MXCalculator.getMX} ultimately fires with a Record_MX instance which
gives the address in the A record for the name.
"""
class DummyResolver(object):
"""
Fake resolver which will fail an MX lookup but then succeed a
getHostByName call.
"""
def lookupMailExchange(self, domain):
return defer.fail(DNSNameError())
def getHostByName(self, domain):
return defer.succeed("1.2.3.4")
self.mx.resolver = DummyResolver()
d = self.mx.getMX("domain")
d.addCallback(self.assertEqual, Record_MX(name="1.2.3.4"))
return d
def test_cnameWithoutGlueRecords(self):
"""
If an MX lookup returns a single CNAME record as a result, MXCalculator
will perform an MX lookup for the canonical name indicated and return
the MX record which results.
"""
alias = "alias.example.com"
canonical = "canonical.example.com"
exchange = "mail.example.com"
class DummyResolver(object):
"""
Fake resolver which will return a CNAME for an MX lookup of a name
which is an alias and an MX for an MX lookup of the canonical name.
"""
def lookupMailExchange(self, domain):
if domain == alias:
return defer.succeed((
[RRHeader(name=domain,
type=Record_CNAME.TYPE,
payload=Record_CNAME(canonical))],
[], []))
elif domain == canonical:
return defer.succeed((
[RRHeader(name=domain,
type=Record_MX.TYPE,
payload=Record_MX(0, exchange))],
[], []))
else:
return defer.fail(DNSNameError(domain))
self.mx.resolver = DummyResolver()
d = self.mx.getMX(alias)
d.addCallback(self.assertEqual, Record_MX(name=exchange))
return d
def test_cnameChain(self):
"""
If L{MXCalculator.getMX} encounters a CNAME chain which is longer than
the length specified, the returned L{Deferred} should errback with
L{CanonicalNameChainTooLong}.
"""
class DummyResolver(object):
"""
Fake resolver which generates a CNAME chain of infinite length in
response to MX lookups.
"""
chainCounter = 0
def lookupMailExchange(self, domain):
self.chainCounter += 1
name = 'x-%d.example.com' % (self.chainCounter,)
return defer.succeed((
[RRHeader(name=domain,
type=Record_CNAME.TYPE,
payload=Record_CNAME(name))],
[], []))
cnameLimit = 3
self.mx.resolver = DummyResolver()
d = self.mx.getMX("mail.example.com", cnameLimit)
self.assertFailure(
d, twisted.mail.relaymanager.CanonicalNameChainTooLong)
def cbChainTooLong(error):
self.assertEqual(error.args[0], Record_CNAME("x-%d.example.com" % (cnameLimit + 1,)))
self.assertEqual(self.mx.resolver.chainCounter, cnameLimit + 1)
d.addCallback(cbChainTooLong)
return d
def test_cnameWithGlueRecords(self):
"""
If an MX lookup returns a CNAME and the MX record for the CNAME, the
L{Deferred} returned by L{MXCalculator.getMX} should be called back
with the name from the MX record without further lookups being
attempted.
"""
lookedUp = []
alias = "alias.example.com"
canonical = "canonical.example.com"
exchange = "mail.example.com"
class DummyResolver(object):
def lookupMailExchange(self, domain):
if domain != alias or lookedUp:
# Don't give back any results for anything except the alias
# or on any request after the first.
return ([], [], [])
return defer.succeed((
[RRHeader(name=alias,
type=Record_CNAME.TYPE,
payload=Record_CNAME(canonical)),
RRHeader(name=canonical,
type=Record_MX.TYPE,
payload=Record_MX(name=exchange))],
[], []))
self.mx.resolver = DummyResolver()
d = self.mx.getMX(alias)
d.addCallback(self.assertEqual, Record_MX(name=exchange))
return d
def test_cnameLoopWithGlueRecords(self):
"""
If an MX lookup returns two CNAME records which point to each other,
the loop should be detected and the L{Deferred} returned by
L{MXCalculator.getMX} should be errbacked with L{CanonicalNameLoop}.
"""
firstAlias = "cname1.example.com"
secondAlias = "cname2.example.com"
class DummyResolver(object):
def lookupMailExchange(self, domain):
return defer.succeed((
[RRHeader(name=firstAlias,
type=Record_CNAME.TYPE,
payload=Record_CNAME(secondAlias)),
RRHeader(name=secondAlias,
type=Record_CNAME.TYPE,
payload=Record_CNAME(firstAlias))],
[], []))
self.mx.resolver = DummyResolver()
d = self.mx.getMX(firstAlias)
self.assertFailure(d, twisted.mail.relaymanager.CanonicalNameLoop)
return d
def testManyRecords(self):
self.auth.addresses['test.domain'] = [
'mx1.test.domain', 'mx2.test.domain', 'mx3.test.domain'
]
return self.mx.getMX('test.domain'
).addCallback(self._cbManyRecordsSuccessfulLookup
)
def _cbManyRecordsSuccessfulLookup(self, mx):
self.failUnless(str(mx.name).split('.', 1)[0] in ('mx1', 'mx2', 'mx3'))
self.mx.markBad(str(mx.name))
return self.mx.getMX('test.domain'
).addCallback(self._cbManyRecordsDifferentResult, mx
)
def _cbManyRecordsDifferentResult(self, nextMX, mx):
self.assertNotEqual(str(mx.name), str(nextMX.name))
self.mx.markBad(str(nextMX.name))
return self.mx.getMX('test.domain'
).addCallback(self._cbManyRecordsLastResult, mx, nextMX
)
def _cbManyRecordsLastResult(self, lastMX, mx, nextMX):
self.assertNotEqual(str(mx.name), str(lastMX.name))
self.assertNotEqual(str(nextMX.name), str(lastMX.name))
self.mx.markBad(str(lastMX.name))
self.mx.markGood(str(nextMX.name))
return self.mx.getMX('test.domain'
).addCallback(self._cbManyRecordsRepeatSpecificResult, nextMX
)
def _cbManyRecordsRepeatSpecificResult(self, againMX, nextMX):
self.assertEqual(str(againMX.name), str(nextMX.name))
class LiveFireExercise(unittest.TestCase):
if interfaces.IReactorUDP(reactor, None) is None:
skip = "UDP support is required to determining MX records"
def setUp(self):
setUpDNS(self)
self.tmpdirs = [
'domainDir', 'insertionDomain', 'insertionQueue',
'destinationDomain', 'destinationQueue'
]
def tearDown(self):
for d in self.tmpdirs:
if os.path.exists(d):
shutil.rmtree(d)
return tearDownDNS(self)
def testLocalDelivery(self):
service = mail.mail.MailService()
service.smtpPortal.registerChecker(cred.checkers.AllowAnonymousAccess())
domain = mail.maildir.MaildirDirdbmDomain(service, 'domainDir')
domain.addUser('user', 'password')
service.addDomain('test.domain', domain)
service.portals[''] = service.portals['test.domain']
map(service.portals[''].registerChecker, domain.getCredentialsCheckers())
service.setQueue(mail.relay.DomainQueuer(service))
manager = mail.relaymanager.SmartHostSMTPRelayingManager(service.queue, None)
helper = mail.relaymanager.RelayStateHelper(manager, 1)
f = service.getSMTPFactory()
self.smtpServer = reactor.listenTCP(0, f, interface='127.0.0.1')
client = LineSendingProtocol([
'HELO meson',
'MAIL FROM: <user@hostname>',
'RCPT TO: <user@test.domain>',
'DATA',
'This is the message',
'.',
'QUIT'
])
done = Deferred()
f = protocol.ClientFactory()
f.protocol = lambda: client
f.clientConnectionLost = lambda *args: done.callback(None)
reactor.connectTCP('127.0.0.1', self.smtpServer.getHost().port, f)
def finished(ign):
mbox = domain.requestAvatar('user', None, pop3.IMailbox)[1]
msg = mbox.getMessage(0).read()
self.failIfEqual(msg.find('This is the message'), -1)
return self.smtpServer.stopListening()
done.addCallback(finished)
return done
def testRelayDelivery(self):
# Here is the service we will connect to and send mail from
insServ = mail.mail.MailService()
insServ.smtpPortal.registerChecker(cred.checkers.AllowAnonymousAccess())
domain = mail.maildir.MaildirDirdbmDomain(insServ, 'insertionDomain')
insServ.addDomain('insertion.domain', domain)
os.mkdir('insertionQueue')
insServ.setQueue(mail.relaymanager.Queue('insertionQueue'))
insServ.domains.setDefaultDomain(mail.relay.DomainQueuer(insServ))
manager = mail.relaymanager.SmartHostSMTPRelayingManager(insServ.queue)
manager.fArgs += ('test.identity.hostname',)
helper = mail.relaymanager.RelayStateHelper(manager, 1)
# Yoink! Now the internet obeys OUR every whim!
manager.mxcalc = mail.relaymanager.MXCalculator(self.resolver)
# And this is our whim.
self.auth.addresses['destination.domain'] = ['127.0.0.1']
f = insServ.getSMTPFactory()
self.insServer = reactor.listenTCP(0, f, interface='127.0.0.1')
# Here is the service the previous one will connect to for final
# delivery
destServ = mail.mail.MailService()
destServ.smtpPortal.registerChecker(cred.checkers.AllowAnonymousAccess())
domain = mail.maildir.MaildirDirdbmDomain(destServ, 'destinationDomain')
domain.addUser('user', 'password')
destServ.addDomain('destination.domain', domain)
os.mkdir('destinationQueue')
destServ.setQueue(mail.relaymanager.Queue('destinationQueue'))
manager2 = mail.relaymanager.SmartHostSMTPRelayingManager(destServ.queue)
helper = mail.relaymanager.RelayStateHelper(manager, 1)
helper.startService()
f = destServ.getSMTPFactory()
self.destServer = reactor.listenTCP(0, f, interface='127.0.0.1')
# Update the port number the *first* relay will connect to, because we can't use
# port 25
manager.PORT = self.destServer.getHost().port
client = LineSendingProtocol([
'HELO meson',
'MAIL FROM: <user@wherever>',
'RCPT TO: <user@destination.domain>',
'DATA',
'This is the message',
'.',
'QUIT'
])
done = Deferred()
f = protocol.ClientFactory()
f.protocol = lambda: client
f.clientConnectionLost = lambda *args: done.callback(None)
reactor.connectTCP('127.0.0.1', self.insServer.getHost().port, f)
def finished(ign):
# First part of the delivery is done. Poke the queue manually now
# so we don't have to wait for the queue to be flushed.
delivery = manager.checkState()
def delivered(ign):
mbox = domain.requestAvatar('user', None, pop3.IMailbox)[1]
msg = mbox.getMessage(0).read()
self.failIfEqual(msg.find('This is the message'), -1)
self.insServer.stopListening()
self.destServer.stopListening()
helper.stopService()
delivery.addCallback(delivered)
return delivery
done.addCallback(finished)
return done
aliasFile = StringIO.StringIO("""\
# Here's a comment
# woop another one
testuser: address1,address2, address3,
continuation@address, |/bin/process/this
usertwo:thisaddress,thataddress, lastaddress
lastuser: :/includable, /filename, |/program, address
""")
class LineBufferMessage:
def __init__(self):
self.lines = []
self.eom = False
self.lost = False
def lineReceived(self, line):
self.lines.append(line)
def eomReceived(self):
self.eom = True
return defer.succeed('<Whatever>')
def connectionLost(self):
self.lost = True
class AliasTestCase(unittest.TestCase):
lines = [
'First line',
'Next line',
'',
'After a blank line',
'Last line'
]
def setUp(self):
aliasFile.seek(0)
def testHandle(self):
result = {}
lines = [
'user: another@host\n',
'nextuser: |/bin/program\n',
'user: me@again\n',
'moreusers: :/etc/include/filename\n',
'multiuser: first@host, second@host,last@anotherhost',
]
for l in lines:
mail.alias.handle(result, l, 'TestCase', None)
self.assertEqual(result['user'], ['another@host', 'me@again'])
self.assertEqual(result['nextuser'], ['|/bin/program'])
self.assertEqual(result['moreusers'], [':/etc/include/filename'])
self.assertEqual(result['multiuser'], ['first@host', 'second@host', 'last@anotherhost'])
def testFileLoader(self):
domains = {'': object()}
result = mail.alias.loadAliasFile(domains, fp=aliasFile)
self.assertEqual(len(result), 3)
group = result['testuser']
s = str(group)
for a in ('address1', 'address2', 'address3', 'continuation@address', '/bin/process/this'):
self.failIfEqual(s.find(a), -1)
self.assertEqual(len(group), 5)
group = result['usertwo']
s = str(group)
for a in ('thisaddress', 'thataddress', 'lastaddress'):
self.failIfEqual(s.find(a), -1)
self.assertEqual(len(group), 3)
group = result['lastuser']
s = str(group)
self.assertEqual(s.find('/includable'), -1)
for a in ('/filename', 'program', 'address'):
self.failIfEqual(s.find(a), -1, '%s not found' % a)
self.assertEqual(len(group), 3)
def testMultiWrapper(self):
msgs = LineBufferMessage(), LineBufferMessage(), LineBufferMessage()
msg = mail.alias.MultiWrapper(msgs)
for L in self.lines:
msg.lineReceived(L)
return msg.eomReceived().addCallback(self._cbMultiWrapper, msgs)
def _cbMultiWrapper(self, ignored, msgs):
for m in msgs:
self.failUnless(m.eom)
self.failIf(m.lost)
self.assertEqual(self.lines, m.lines)
def testFileAlias(self):
tmpfile = self.mktemp()
a = mail.alias.FileAlias(tmpfile, None, None)
m = a.createMessageReceiver()
for l in self.lines:
m.lineReceived(l)
return m.eomReceived().addCallback(self._cbTestFileAlias, tmpfile)
def _cbTestFileAlias(self, ignored, tmpfile):
lines = file(tmpfile).readlines()
self.assertEqual([L[:-1] for L in lines], self.lines)
class DummyDomain(object):
"""
Test domain for L{AddressAliasTests}.
"""
def __init__(self, address):
self.address = address
def exists(self, user, memo=None):
"""
@returns: When a C{memo} is passed in this will raise a
L{smtp.SMTPBadRcpt} exception, otherwise a boolean
indicating if the C{user} and string version of
L{self.address} are equal or not.
@rtype: C{bool}
"""
if memo:
raise mail.smtp.SMTPBadRcpt('ham')
return lambda: user == str(self.address)
class AddressAliasTests(unittest.TestCase):
"""
Tests for L{twisted.mail.alias.AddressAlias}.
"""
def setUp(self):
"""
Setup an L{AddressAlias}.
"""
self.address = mail.smtp.Address('foo@bar')
domains = {self.address.domain: DummyDomain(self.address)}
self.alias = mail.alias.AddressAlias(self.address, domains,
self.address)
def test_createMessageReceiver(self):
"""
L{createMessageReceiever} calls C{exists()} on the domain object
which key matches the C{alias} passed to L{AddressAlias}.
"""
self.assertTrue(self.alias.createMessageReceiver())
def test_str(self):
"""
The string presentation of L{AddressAlias} includes the alias.
"""
self.assertEqual(str(self.alias), '<Address foo@bar>')
def test_resolve(self):
"""
L{resolve} will look for additional aliases when an C{aliasmap}
dictionary is passed, and returns C{None} if none were found.
"""
self.assertEqual(self.alias.resolve({self.address: 'bar'}), None)
def test_resolveWithoutAliasmap(self):
"""
L{resolve} returns C{None} when the alias could not be found in the
C{aliasmap} and no L{mail.smtp.User} with this alias exists either.
"""
self.assertEqual(self.alias.resolve({}), None)
class DummyProcess(object):
__slots__ = ['onEnd']
class MockProcessAlias(mail.alias.ProcessAlias):
"""
A alias processor that doesn't actually launch processes.
"""
def spawnProcess(self, proto, program, path):
"""
Don't spawn a process.
"""
class MockAliasGroup(mail.alias.AliasGroup):
"""
An alias group using C{MockProcessAlias}.
"""
processAliasFactory = MockProcessAlias
class StubProcess(object):
"""
Fake implementation of L{IProcessTransport}.
@ivar signals: A list of all the signals which have been sent to this fake
process.
"""
def __init__(self):
self.signals = []
def loseConnection(self):
"""
No-op implementation of disconnection.
"""
def signalProcess(self, signal):
"""
Record a signal sent to this process for later inspection.
"""
self.signals.append(signal)
class ProcessAliasTestCase(unittest.TestCase):
"""
Tests for alias resolution.
"""
if interfaces.IReactorProcess(reactor, None) is None:
skip = "IReactorProcess not supported"
lines = [
'First line',
'Next line',
'',
'After a blank line',
'Last line'
]
def exitStatus(self, code):
"""
Construct a status from the given exit code.
@type code: L{int} between 0 and 255 inclusive.
@param code: The exit status which the code will represent.
@rtype: L{int}
@return: A status integer for the given exit code.
"""
# /* Macros for constructing status values. */
# #define __W_EXITCODE(ret, sig) ((ret) << 8 | (sig))
status = (code << 8) | 0
# Sanity check
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(os.WEXITSTATUS(status), code)
self.assertFalse(os.WIFSIGNALED(status))
return status
def signalStatus(self, signal):
"""
Construct a status from the given signal.
@type signal: L{int} between 0 and 255 inclusive.
@param signal: The signal number which the status will represent.
@rtype: L{int}
@return: A status integer for the given signal.
"""
# /* If WIFSIGNALED(STATUS), the terminating signal. */
# #define __WTERMSIG(status) ((status) & 0x7f)
# /* Nonzero if STATUS indicates termination by a signal. */
# #define __WIFSIGNALED(status) \
# (((signed char) (((status) & 0x7f) + 1) >> 1) > 0)
status = signal
# Sanity check
self.assertTrue(os.WIFSIGNALED(status))
self.assertEqual(os.WTERMSIG(status), signal)
self.assertFalse(os.WIFEXITED(status))
return status
def setUp(self):
"""
Replace L{smtp.DNSNAME} with a well-known value.
"""
self.DNSNAME = smtp.DNSNAME
smtp.DNSNAME = ''
def tearDown(self):
"""
Restore the original value of L{smtp.DNSNAME}.
"""
smtp.DNSNAME = self.DNSNAME
def test_processAlias(self):
"""
Standard call to C{mail.alias.ProcessAlias}: check that the specified
script is called, and that the input is correctly transferred to it.
"""
sh = FilePath(self.mktemp())
sh.setContent("""\
#!/bin/sh
rm -f process.alias.out
while read i; do
echo $i >> process.alias.out
done""")
os.chmod(sh.path, 0700)
a = mail.alias.ProcessAlias(sh.path, None, None)
m = a.createMessageReceiver()
for l in self.lines:
m.lineReceived(l)
def _cbProcessAlias(ignored):
lines = file('process.alias.out').readlines()
self.assertEqual([L[:-1] for L in lines], self.lines)
return m.eomReceived().addCallback(_cbProcessAlias)
def test_processAliasTimeout(self):
"""
If the alias child process does not exit within a particular period of
time, the L{Deferred} returned by L{MessageWrapper.eomReceived} should
fail with L{ProcessAliasTimeout} and send the I{KILL} signal to the
child process..
"""
reactor = task.Clock()
transport = StubProcess()
proto = mail.alias.ProcessAliasProtocol()
proto.makeConnection(transport)
receiver = mail.alias.MessageWrapper(proto, None, reactor)
d = receiver.eomReceived()
reactor.advance(receiver.completionTimeout)
def timedOut(ignored):
self.assertEqual(transport.signals, ['KILL'])
# Now that it has been killed, disconnect the protocol associated
# with it.
proto.processEnded(
ProcessTerminated(self.signalStatus(signal.SIGKILL)))
self.assertFailure(d, mail.alias.ProcessAliasTimeout)
d.addCallback(timedOut)
return d
def test_earlyProcessTermination(self):
"""
If the process associated with an L{mail.alias.MessageWrapper} exits
before I{eomReceived} is called, the L{Deferred} returned by
I{eomReceived} should fail.
"""
transport = StubProcess()
protocol = mail.alias.ProcessAliasProtocol()
protocol.makeConnection(transport)
receiver = mail.alias.MessageWrapper(protocol, None, None)
protocol.processEnded(failure.Failure(ProcessDone(0)))
return self.assertFailure(receiver.eomReceived(), ProcessDone)
def _terminationTest(self, status):
"""
Verify that if the process associated with an
L{mail.alias.MessageWrapper} exits with the given status, the
L{Deferred} returned by I{eomReceived} fails with L{ProcessTerminated}.
"""
transport = StubProcess()
protocol = mail.alias.ProcessAliasProtocol()
protocol.makeConnection(transport)
receiver = mail.alias.MessageWrapper(protocol, None, None)
protocol.processEnded(
failure.Failure(ProcessTerminated(status)))
return self.assertFailure(receiver.eomReceived(), ProcessTerminated)
def test_errorProcessTermination(self):
"""
If the process associated with an L{mail.alias.MessageWrapper} exits
with a non-zero exit code, the L{Deferred} returned by I{eomReceived}
should fail.
"""
return self._terminationTest(self.exitStatus(1))
def test_signalProcessTermination(self):
"""
If the process associated with an L{mail.alias.MessageWrapper} exits
because it received a signal, the L{Deferred} returned by
I{eomReceived} should fail.
"""
return self._terminationTest(self.signalStatus(signal.SIGHUP))
def test_aliasResolution(self):
"""
Check that the C{resolve} method of alias processors produce the correct
set of objects:
- direct alias with L{mail.alias.AddressAlias} if a simple input is passed
- aliases in a file with L{mail.alias.FileWrapper} if an input in the format
'/file' is given
- aliases resulting of a process call wrapped by L{mail.alias.MessageWrapper}
if the format is '|process'
"""
aliases = {}
domain = {'': TestDomain(aliases, ['user1', 'user2', 'user3'])}
A1 = MockAliasGroup(['user1', '|echo', '/file'], domain, 'alias1')
A2 = MockAliasGroup(['user2', 'user3'], domain, 'alias2')
A3 = mail.alias.AddressAlias('alias1', domain, 'alias3')
aliases.update({
'alias1': A1,
'alias2': A2,
'alias3': A3,
})
res1 = A1.resolve(aliases)
r1 = map(str, res1.objs)
r1.sort()
expected = map(str, [
mail.alias.AddressAlias('user1', None, None),
mail.alias.MessageWrapper(DummyProcess(), 'echo'),
mail.alias.FileWrapper('/file'),
])
expected.sort()
self.assertEqual(r1, expected)
res2 = A2.resolve(aliases)
r2 = map(str, res2.objs)
r2.sort()
expected = map(str, [
mail.alias.AddressAlias('user2', None, None),
mail.alias.AddressAlias('user3', None, None)
])
expected.sort()
self.assertEqual(r2, expected)
res3 = A3.resolve(aliases)
r3 = map(str, res3.objs)
r3.sort()
expected = map(str, [
mail.alias.AddressAlias('user1', None, None),
mail.alias.MessageWrapper(DummyProcess(), 'echo'),
mail.alias.FileWrapper('/file'),
])
expected.sort()
self.assertEqual(r3, expected)
def test_cyclicAlias(self):
"""
Check that a cycle in alias resolution is correctly handled.
"""
aliases = {}
domain = {'': TestDomain(aliases, [])}
A1 = mail.alias.AddressAlias('alias2', domain, 'alias1')
A2 = mail.alias.AddressAlias('alias3', domain, 'alias2')
A3 = mail.alias.AddressAlias('alias1', domain, 'alias3')
aliases.update({
'alias1': A1,
'alias2': A2,
'alias3': A3
})
self.assertEqual(aliases['alias1'].resolve(aliases), None)
self.assertEqual(aliases['alias2'].resolve(aliases), None)
self.assertEqual(aliases['alias3'].resolve(aliases), None)
A4 = MockAliasGroup(['|echo', 'alias1'], domain, 'alias4')
aliases['alias4'] = A4
res = A4.resolve(aliases)
r = map(str, res.objs)
r.sort()
expected = map(str, [
mail.alias.MessageWrapper(DummyProcess(), 'echo')
])
expected.sort()
self.assertEqual(r, expected)
class TestDomain:
def __init__(self, aliases, users):
self.aliases = aliases
self.users = users
def exists(self, user, memo=None):
user = user.dest.local
if user in self.users:
return lambda: mail.alias.AddressAlias(user, None, None)
try:
a = self.aliases[user]
except:
raise smtp.SMTPBadRcpt(user)
else:
aliases = a.resolve(self.aliases, memo)
if aliases:
return lambda: aliases
raise smtp.SMTPBadRcpt(user)
class SSLContextFactoryTests(unittest.TestCase):
"""
Tests for twisted.mail.protocols.SSLContextFactory.
"""
def test_deprecation(self):
"""
Accessing L{twisted.mail.protocols.SSLContextFactory} emits a
deprecation warning recommending the use of the more general SSL context
factory from L{twisted.internet.ssl}.
"""
mail.protocols.SSLContextFactory
warningsShown = self.flushWarnings([self.test_deprecation])
self.assertEqual(len(warningsShown), 1)
self.assertIdentical(warningsShown[0]['category'], DeprecationWarning)
self.assertEqual(
warningsShown[0]['message'],
'twisted.mail.protocols.SSLContextFactory was deprecated in '
'Twisted 12.2.0: Use twisted.internet.ssl.'
'DefaultOpenSSLContextFactory instead.')
from twisted.python.runtime import platformType
import types
if platformType != "posix":
for o in locals().values():
if isinstance(o, (types.ClassType, type)) and issubclass(o, unittest.TestCase):
o.skip = "twisted.mail only works on posix"
| bsd-3-clause |
nihilus/DIE | DIE/Plugins/DataParsers/SegmentParser/ModuleParser.py | 9 | 1225 | from DIE.Lib.DataPluginBase import DataPluginBase
import idc
import idaapi
class ModuleParser(DataPluginBase):
"""
A parser for boolean values
"""
def __init__(self):
super(ModuleParser, self).__init__()
self.setPluginType("Module")
def registerSupportedTypes(self):
"""
Register string types
@return:
"""
self.addSuportedType("HMODULE", 0)
def guessValues(self, rawValue):
"""
Guess string values
"""
module = idc.GetModuleName(rawValue)
if module == 0:
return False
self.addParsedvalue(module, 5, "Module", hex(rawValue))
return True
def matchType(self, type):
"""
Check if given type is of a string type
@param type: IDA type_info_t object
@return: True if given type is a string type otherwise False
"""
return True
def parseValue(self, rawValue):
"""
Parse the string value
@return:
"""
module = idc.GetModuleName(rawValue)
if module == 0:
return False
self.addParsedvalue(module, 5, "Module", hex(rawValue))
return True
| mit |
garthylou/Libreosteo | libreosteoweb/api/file_integrator.py | 1 | 19791 | # This file is part of LibreOsteo.
#
# LibreOsteo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibreOsteo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LibreOsteo. If not, see <http://www.gnu.org/licenses/>.
import logging
import csv
from django.utils.translation import ugettext_lazy as _
import random
from libreosteoweb.models import Patient, ExaminationType, ExaminationStatus
from datetime import date, datetime
from .utils import enum, Singleton, _unicode
logger = logging.getLogger(__name__)
_CSV_BUFFER_SIZE = 1024 * 1024 * 10
class Extractor(object):
def extract(self, instance):
"""
return a dict with key patient and examination which gives some extract of the content,
with list of dict which contains line number and the content.
"""
result = {}
extract_patient = self.extract_file(instance.file_patient)
extract_examination = self.extract_file(instance.file_examination)
result['patient'] = extract_patient
result['examination'] = extract_examination
return result
def analyze(self, instance):
"""
return a dict with key patient, and examination, which indicates if :
- the expected file has the correct type.
- the file is is_valid
- the file is not is_empty
- list of errors if found.
"""
logger.info("* Analyze the instance")
result = {}
(type_file, is_valid, is_empty,
errors) = self.analyze_file(instance.file_patient)
result['patient'] = (type_file, is_valid, is_empty, errors)
(type_file, is_valid, is_empty,
errors) = self.analyze_file(instance.file_examination)
result['examination'] = (type_file, is_valid, is_empty, errors)
return result
def analyze_file(self, internal_file):
if not bool(internal_file):
return ('', False, True, [])
try:
handler = AnalyzerHandler()
report = handler.analyze(internal_file)
except:
logger.exception('Analyze failed.')
return ('', False, True, [_('Analyze failed on this file')])
if report.type == FileCsvType.PATIENT:
return ('patient', report.is_valid, report.is_empty, [])
if report.type == FileCsvType.EXAMINATION:
return ('examination', report.is_valid, report.is_empty, [])
else:
return ('patient', False, True,
[_('Cannot recognize the patient file')])
def extract_file(self, internal_file):
if not bool(internal_file):
return {}
result = {}
try:
content = FileContentProxy().get_content(internal_file,
line_filter=filter)
nb_row = content['nb_row'] - 1
if nb_row > 0:
idx = sorted(
random.sample(range(1, nb_row + 1), min(5, nb_row)))
logger.info("indexes = %s " % idx)
for i in idx:
result['%s' % (i + 1)] = content['content'][i - 1]
except:
logger.exception('Extractor failed.')
logger.info("result is %s" % result)
return result
def get_content(self, internal_file):
return FileContentProxy().get_content(internal_file,
line_filter=filter)
def unproxy(self, internal_file):
FileContentProxy().unproxy(internal_file, line_filter=filter)
def filter(line):
logger.debug("filtering ...")
if not hasattr(line, 'decode'):
logger.debug("no decode available")
return line
result_line = None
try:
logger.debug("Try to decode against utf-8")
result_line = line.decode('utf-8')
except:
logger.debug("Fail to decode against utf-8")
pass
if result_line is None:
try:
logger.debug("Try to decode against iso-8859-1")
result_line = line.decode('iso-8859-1')
except:
logger.info("Fail to decode against iso-8859-1")
result_line = _(
'Cannot read the content file. Check the encoding.')
return result_line
FileCsvType = enum('FileCsvType', 'PATIENT', 'EXAMINATION')
class AnalyzeReport(object):
def __init__(self, is_empty, is_valid, internal_type):
self.is_empty = is_empty
self.is_valid = is_valid
self.type = internal_type
def is_empty(self):
return self.is_empty
def is_valid(self):
return self.is_valid
def type(self):
return self.type
class Analyzer(object):
"""
Performs the analyze on the content.
It should be inherited.
"""
identifier = None
type = None
def __init__(self, content=None):
self.content = content
def is_instance(self):
if self.content is not None:
try:
self._parse_header(self.content['header'])
return True
except ValueError:
return False
return False
def _parse_header(self, header):
_unicode(header[:]).lower().index(self.__class__.identifier)
def get_report(self):
is_empty = self.content.nb_row <= 1
# is_valid should check the number of columns
is_valid = len(self.content.header) == self.__class__.field_number
return AnalyzeReport(is_empty, is_valid, self.__class__.type)
class AnalyzerPatientFile(Analyzer):
identifier = 'nom de famille'
type = FileCsvType.PATIENT
field_number = 24
def __init__(self, content=None):
super(self.__class__, self).__init__(content=content)
class AnalyzerExaminationFile(Analyzer):
identifier = 'conclusion'
type = FileCsvType.EXAMINATION
field_number = 14
def __init__(self, content=None):
super(self.__class__, self).__init__(content=content)
class FileContentAdapter(dict):
def __init__(self, ourfile, line_filter=None):
self.file = ourfile
self['content'] = None
self.filter = line_filter
if self.filter is None:
self.filter = self.passthrough
def __getattr__(self, attr):
return self[attr]
def get_content(self):
if self['content'] is None:
reader = self._get_reader()
rownum = 0
header = None
content = []
for row in reader:
# Save header row.
if rownum == 0:
header = [self.filter(c) for c in row]
else:
content.append([self.filter(c) for c in row])
rownum += 1
self.file.close()
self['content'] = content
self['nb_row'] = rownum
self['header'] = header
return self
def _get_reader(self):
if not bool(self.file):
return None
self.file.open(mode='r')
logger.info("* Try to guess the dialect on csv")
csv_buffer = self.file.read(_CSV_BUFFER_SIZE)
# Compatibility with python2 and python3
dialect = csv.Sniffer().sniff(csv_buffer)
self.file.seek(0)
reader = csv.reader(self.file, dialect)
return reader
def passthrough(self, line):
return line
class DecodeCsvReader(object):
def __init__(self, underlying_instance, decode_filter):
self.reader_instance = underlying_instance
self.filter = decode_filter
def __next__(self):
return self.filter(next(self.reader_instance))
def __iter__(self):
return self
class FileContentKey(object):
def __init__(self, ourfile, line_filter):
self.file = ourfile
self.line_filter = line_filter
def __hash__(self):
return hash((self.file, self.line_filter))
def __eq__(self, other):
return (self.file, self.line_filter) == (other.file, other.line_filter)
def __ne__(self, other):
# Not strictly necessary, but to avoid having both x==y and x!=y
# True at the same time
return not (self == other)
class FileContentProxy(object):
__metaclass__ = Singleton
file_content = {}
def get_content(self, ourfile, line_filter=None):
key = FileContentKey(ourfile, line_filter)
try:
return self.file_content[key]
except KeyError:
self.file_content[key] = FileContentAdapter(
ourfile, line_filter).get_content()
return self.file_content[key]
def unproxy(self, ourfile, line_filter=None):
key = FileContentKey(ourfile, line_filter)
try:
self.file_content[key] = None
except:
pass
class AnalyzerHandler(object):
analyzers = [AnalyzerPatientFile, AnalyzerExaminationFile]
def analyze(self, ourfile):
if not bool(ourfile):
return AnalyzeReport(False, False, None)
content = self.get_content(ourfile)
for analyzer in self.analyzers:
instance = analyzer(content)
if instance.is_instance():
return instance.get_report()
logger.warn("No Analyzer found")
return AnalyzeReport(False, False, None)
def get_content(self, ourfile):
return FileContentProxy().get_content(ourfile, line_filter=filter)
def filter(self, line):
result_line = None
try:
result_line = line.decode('utf-8')
except:
pass
if result_line is None:
try:
result_line = line.decode('iso-8859-1')
except:
result_line = _(
'Cannot read the content file. Check the encoding.')
return result_line
class InvalidIntegrationFile(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class IntegratorHandler(object):
def integrate(self, file, file_additional=None, user=None):
integrator = IntegratorFactory().get_instance(file)
if integrator is None:
raise InvalidIntegrationFile(
"This file %s is not valid to be integrated." % (file))
result = integrator.integrate(file,
file_additional=file_additional,
user=user)
return result
def post_processing(self, files):
extractor = Extractor()
for f in files:
extractor.unproxy(f)
class IntegratorFactory(object):
def __init__(self, serializer_class=None):
self.extractor = Extractor()
self.serializer_class = serializer_class
def get_instance(self, file):
result = self.extractor.analyze_file(file)
if not result[1]:
return None
if result[0] == 'patient':
from .serializers import PatientSerializer
return IntegratorPatient(serializer_class=PatientSerializer)
elif result[0] == 'examination':
from .serializers import ExaminationSerializer
return IntegratorExamination(
serializer_class=ExaminationSerializer)
class FilePatientFactory(object):
def __init__(self):
from .serializers import PatientSerializer
self.serializer_class = PatientSerializer
def get_serializer(self, row):
try:
data = {
'family_name': row[1],
'original_name': row[2],
'first_name': row[3],
'birth_date': self.get_date(row[4]),
'sex': self.get_sex_value(row[5]),
'address_street': row[6],
'address_complement': row[7],
'address_zipcode': row[8],
'address_city': row[9],
'email': row[10],
'phone': row[11],
'mobile_phone': row[12],
'job': row[13],
'hobbies': row[14],
'smoker': self.get_boolean_value(row[15]),
'laterality': self.get_laterality_value(row[16]),
'important_info': row[17],
'current_treatment': row[18],
'surgical_history': row[19],
'medical_history': row[20],
'family_history': row[21],
'trauma_history': row[22],
'medical_reports': row[23],
'creation_date': self.get_default_date(),
'consent_check': False
}
serializer = self.serializer_class(data=data)
except ValueError as e:
logger.exception("Exception when creating examination.")
serializer = {'errors': ["%s" % e]}
except:
logger.exception("Exception when creating examination.")
return serializer
def get_sex_value(self, value):
if value.upper() == 'F':
return 'F'
else:
return 'M'
def get_laterality_value(self, value):
if value.upper() == 'G' or value.upper() == 'L':
return 'L'
else:
return 'R'
def get_boolean_value(self, value):
if value.lower() == 'o' or value.lower() == 'oui' or value.lower(
) == 'true' or value.lower() == 't':
return True
else:
return False
def get_default_date(self):
return date(2011, 1, 1)
def get_date(self, value):
f = "%d/%m/%Y"
return datetime.strptime(value, f).date()
class AbstractIntegrator(object):
def integrate(self, file, file_additional=None, user=None):
pass
class IntegratorPatient(AbstractIntegrator):
def __init__(self, serializer_class=None):
self.extractor = Extractor()
self.serializer_class = serializer_class
def integrate(self, file, file_additional=None, user=None):
content = self.extractor.get_content(file)
nb_line = 0
errors = []
factory = FilePatientFactory()
for idx, r in enumerate(content['content']):
serializer = factory.get_serializer(r)
try:
serializer['errors']
errors.append((idx + 2, serializer['errors']))
except KeyError:
if serializer.is_valid():
serializer.save()
nb_line += 1
else:
# idx + 2 because : we have header and the index start from 0
# To have the line number we have to add 2 to the index....
errors.append((idx + 2, serializer.errors))
logger.info("errors detected, data is = %s " %
serializer.initial_data)
return (nb_line, errors)
class IntegratorExamination(AbstractIntegrator):
def __init__(self, serializer_class=None):
self.extractor = Extractor()
self.serializer_class = serializer_class
self.patient_table = None
def integrate(self, file, file_additional=None, user=None):
if file_additional is None:
return (0, [_('Missing patient file to integrate it.')])
content = self.extractor.get_content(file)
nb_line = 0
errors = []
for idx, r in enumerate(content['content']):
logger.info("* Load line from content")
try:
patient = self.get_patient(int(r[0]), file_additional)
data = {
'date': self.get_date(r[1], with_time=True),
'reason': r[2],
'reason_description': r[3],
'orl': r[4],
'visceral': r[5],
'pulmo': r[6],
'uro_gyneco': r[7],
'periphery': r[8],
'general_state': r[9],
'medical_examination': r[10],
'diagnosis': r[11],
'treatments': r[12],
'conclusion': r[13],
'patient': patient.id,
'therapeut': user.id,
'type': ExaminationType.NORMAL,
'status': ExaminationStatus.NOT_INVOICED,
'status_reason': u'%s' % _('Imported examination'),
}
serializer = self.serializer_class(data=data)
if serializer.is_valid():
serializer.save()
nb_line += 1
else:
# idx + 2 because : we have header and the index start from 0
# To have the line number we have to add 2 to the index....
errors.append((idx + 2, serializer.errors))
logger.info("errors detected, data is = %s, errors = %s " %
(data, serializer.errors))
except ValueError as e:
logger.exception("Exception when creating examination.")
errors.append((idx + 2, {
'general_problem':
_('There is a problem when reading this line :') +
_unicode(e)
}))
except:
logger.exception("Exception when creating examination.")
errors.append((idx + 2, {
'general_problem':
_('There is a problem when reading this line.')
}))
return (nb_line, errors)
def get_date(self, value, with_time=False):
f = "%d/%m/%Y"
if with_time:
return datetime.strptime(value, f)
return datetime.strptime(value, f).date()
def get_patient(self, numero, file_patient):
if not bool(file_patient):
return None
if self.patient_table is None:
self._build_patient_table(file_patient)
return self.patient_table[numero]
def _build_patient_table(self, file_patient):
content = self.extractor.get_content(file_patient)
self.patient_table = {}
factory = FilePatientFactory()
for c in content['content']:
serializer = factory.get_serializer(c)
# remove validators to get a validated data through filters
serializer.validators = []
serializer.is_valid()
self.patient_table[int(c[0])] = Patient.objects.filter(
family_name=serializer.validated_data['family_name'],
first_name=serializer.validated_data['first_name'],
birth_date=serializer.validated_data['birth_date']).first()
logger.info("found patient %s " % self.patient_table[int(c[0])])
| gpl-3.0 |
spiceqa/virt-test | libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_create_as.py | 1 | 18414 | import re
import os
import commands
import logging
from autotest.client.shared import error
from virttest import virsh, utils_misc, xml_utils, libvirt_xml
from virttest.libvirt_xml import vm_xml, xcepts
def xml_recover(vmxml):
"""
Recover older xml config with backup vmxml.
:params: vmxml: VMXML object
"""
try:
options = "--snapshots-metadata"
vmxml.undefine(options)
vmxml.define()
return True
except xcepts.LibvirtXMLError, detail:
logging.error("Recover older xml failed:%s.", detail)
return False
def check_snap_in_image(vm_name, snap_name):
"""
check the snapshot info in image
:params: vm_name: VM name
:params: snap_name: Snapshot name
"""
domxml = virsh.dumpxml(vm_name).stdout.strip()
xtf_dom = xml_utils.XMLTreeFile(domxml)
cmd = "qemu-img info " + xtf_dom.find("devices/disk/source").get("file")
img_info = commands.getoutput(cmd).strip()
if re.search(snap_name, img_info):
logging.info("Find snapshot info in image")
return True
else:
return False
def compose_disk_options(test, params, opt_names):
"""
Compose the {disk,mem}spec options
The diskspec file need to add suitable dir with the name which is configed
individually, The 'value' after 'file=' is a parameter which also need to
get from cfg
:params: test & params: system parameters
:params: opt_names: params get from cfg of {disk,mem}spec options
"""
if opt_names.find("file=") >= 0:
opt_disk = opt_names.split("file=")
opt_list = opt_disk[1].split(",")
if len(opt_list) > 1:
left_opt = opt_list[1]
else:
left_opt = ""
if params.get("bad_disk") is not None or \
params.get("external_disk") is not None:
spec_disk = os.path.join(test.virtdir, params.get(opt_list[0]))
else:
spec_disk = os.path.join(test.virtdir, opt_list[0])
return opt_disk[0] + "file=" + spec_disk + left_opt
def check_snapslist(vm_name, options, option_dict, output,
snaps_before, snaps_list):
no_metadata = options.find("--no-metadata")
fdisks = "disks"
# command with print-xml will not really create snapshot
if options.find("print-xml") >= 0:
xtf = xml_utils.XMLTreeFile(output)
# With --print-xml there isn't new snapshot created
if len(snaps_before) != len(snaps_list):
raise error.TestFail("--print-xml create new snapshot")
else:
# The following does not check with print-xml
get_sname = output.split()[2]
# check domain/snapshot xml depends on if have metadata
if no_metadata < 0:
output_dump = virsh.snapshot_dumpxml(vm_name,
get_sname).stdout.strip()
else:
output_dump = virsh.dumpxml(vm_name).stdout.strip()
fdisks = "devices"
xtf = xml_utils.XMLTreeFile(output_dump)
find = 0
for snap in snaps_list:
if snap == get_sname:
find = 1
break
# Should find snap in snaplist without --no-metadata
if (find == 0 and no_metadata < 0):
raise error.TestFail("Can not find snapshot %s!"
% get_sname)
# Should not find snap in list without metadata
elif (find == 1 and no_metadata >= 0):
raise error.TestFail("Can find snapshot metadata even "
"if have --no-metadata")
elif (find == 0 and no_metadata >= 0):
logging.info("Can not find snapshot %s as no-metadata "
"is given" % get_sname)
# Check snapshot only in qemu-img
if (options.find("--disk-only") < 0 and
options.find("--memspec") < 0):
ret = check_snap_in_image(vm_name, get_sname)
if ret is False:
raise error.TestFail("No snap info in image")
else:
logging.info("Find snapshot %s in snapshot list."
% get_sname)
# Check if the disk file exist when disk-only is given
if options.find("disk-only") >= 0:
for disk in xtf.find(fdisks).findall('disk'):
diskpath = disk.find('source').get('file')
if os.path.isfile(diskpath):
logging.info("disk file %s exist" % diskpath)
os.remove(diskpath)
else:
# Didn't find <source file="path to disk"/>
# in output - this could leave a file around
# wherever the main OS image file is found
logging.debug("output_dump=%s", output_dump)
raise error.TestFail("Can not find disk %s"
% diskpath)
# Check if the guest is halted when 'halt' is given
if options.find("halt") >= 0:
domstate = virsh.domstate(vm_name)
if re.match("shut off", domstate.stdout):
logging.info("Domain is halted after create "
"snapshot")
else:
raise error.TestFail("Domain is not halted after "
"snapshot created")
# Check the snapshot xml regardless of having print-xml or not
if (options.find("name") >= 0 and no_metadata < 0):
if xtf.findtext('name') == option_dict["name"]:
logging.info("get snapshot name same as set")
else:
raise error.TestFail("Get wrong snapshot name %s" %
xtf.findtext('name'))
if (options.find("description") >= 0 and no_metadata < 0):
desc = xtf.findtext('description')
if desc == option_dict["description"]:
logging.info("get snapshot description same as set")
else:
raise error.TestFail("Get wrong description on xml")
if options.find("diskspec") >= 0:
if isinstance(option_dict['diskspec'], list):
index = len(option_dict['diskspec'])
else:
index = 1
disks = xtf.find(fdisks).findall('disk')
for num in range(index):
if isinstance(option_dict['diskspec'], list):
option_disk = option_dict['diskspec'][num]
else:
option_disk = option_dict['diskspec']
option_disk = "name=" + option_disk
disk_dict = utils_misc.valued_option_dict(option_disk,
",", 0, "=")
logging.debug("disk_dict is %s", disk_dict)
# For no metadata snapshot do not check name and
# snapshot
if no_metadata < 0:
dname = disks[num].get('name')
logging.debug("dname is %s", dname)
if dname == disk_dict['name']:
logging.info("get disk%d name same as set in "
"diskspec", num)
else:
raise error.TestFail("Get wrong disk%d name %s"
% num, dname)
if option_disk.find('snapshot=') >= 0:
dsnap = disks[num].get('snapshot')
logging.debug("dsnap is %s", dsnap)
if dsnap == disk_dict['snapshot']:
logging.info("get disk%d snapshot type same"
" as set in diskspec", num)
else:
raise error.TestFail("Get wrong disk%d "
"snapshot type %s" %
num, dsnap)
if option_disk.find('driver=') >= 0:
dtype = disks[num].find('driver').get('type')
if dtype == disk_dict['driver']:
logging.info("get disk%d driver type same as "
"set in diskspec", num)
else:
raise error.TestFail("Get wrong disk%d driver "
"type %s" % num, dtype)
if option_disk.find('file=') >= 0:
sfile = disks[num].find('source').get('file')
if sfile == disk_dict['file']:
logging.info("get disk%d source file same as "
"set in diskspec", num)
else:
raise error.TestFail("Get wrong disk%d source "
"file %s" % num, sfile)
# For memspec check if the xml is same as setting
# Also check if the mem file exists
if options.find("memspec") >= 0:
memspec = option_dict['memspec']
if re.search('file=', option_dict['memspec']) < 0:
memspec = 'file=' + option_dict['memspec']
mem_dict = utils_misc.valued_option_dict(memspec, ",", 0,
"=")
logging.debug("mem_dict is %s", mem_dict)
if no_metadata < 0:
if memspec.find('snapshot=') >= 0:
snap = xtf.find('memory').get('snapshot')
if snap == mem_dict['snapshot']:
logging.info("get memory snapshot type same as"
" set in diskspec")
else:
raise error.TestFail("Get wrong memory snapshot"
" type on print xml")
memfile = xtf.find('memory').get('file')
if memfile == mem_dict['file']:
logging.info("get memory file same as set in "
"diskspec")
else:
raise error.TestFail("Get wrong memory file on "
"print xml %s", memfile)
if options.find("print-xml") < 0:
if os.path.isfile(mem_dict['file']):
logging.info("memory file generated")
os.remove(mem_dict['file'])
else:
raise error.TestFail("Fail to generate memory file"
" %s", mem_dict['file'])
def run_virsh_snapshot_create_as(test, params, env):
"""
Test snapshot-create-as command
Make sure that the clean repo can be used because qemu-guest-agent need to
be installed in guest
The command create a snapshot (disk and RAM) from arguments which including
the following point
* virsh snapshot-create-as --print-xml --diskspec --name --description
* virsh snapshot-create-as --print-xml with multi --diskspec
* virsh snapshot-create-as --print-xml --memspec
* virsh snapshot-create-as --description
* virsh snapshot-create-as --no-metadata
* virsh snapshot-create-as --no-metadata --print-xml (negative test)
* virsh snapshot-create-as --atomic --disk-only
* virsh snapshot-create-as --quiesce --disk-only (positive and negative)
* virsh snapshot-create-as --reuse-external
* virsh snapshot-create-as --disk-only --diskspec
* virsh snapshot-create-as --memspec --reuse-external --atomic(negative)
* virsh snapshot-create-as --disk-only and --memspec (negative)
* Create multi snapshots with snapshot-create-as
* Create snapshot with name a--a a--a--snap1
"""
if not virsh.has_help_command('snapshot-create-as'):
raise error.TestNAError("This version of libvirt does not support "
"the snapshot-create-as test")
vm_name = params.get("main_vm")
status_error = params.get("status_error", "no")
options = params.get("snap_createas_opts")
multi_num = params.get("multi_num", "1")
diskspec_num = params.get("diskspec_num", "1")
bad_disk = params.get("bad_disk")
external_disk = params.get("external_disk")
start_ga = params.get("start_ga", "yes")
domain_state = params.get("domain_state")
memspec_opts = params.get("memspec_opts")
diskspec_opts = params.get("diskspec_opts")
opt_names = locals()
if memspec_opts is not None:
mem_options = compose_disk_options(test, params, memspec_opts)
# if the parameters have the disk without "file=" then we only need to
# add testdir for it.
if mem_options is None:
mem_options = os.path.join(test.virtdir, memspec_opts)
options += " --memspec " + mem_options
tag_diskspec = 0
dnum = int(diskspec_num)
if diskspec_opts is not None:
tag_diskspec = 1
opt_names['diskopts_1'] = diskspec_opts
# diskspec_opts[n] is used in cfg when more than 1 --diskspec is used
if dnum > 1:
tag_diskspec = 1
for i in range(1, dnum + 1):
opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i)
if tag_diskspec == 1:
for i in range(1, dnum + 1):
disk_options = compose_disk_options(test, params,
opt_names["diskopts_%s" % i])
options += " --diskspec " + disk_options
logging.debug("options are %s", options)
vm = env.get_vm(vm_name)
option_dict = {}
option_dict = utils_misc.valued_option_dict(options, r' --(?!-)')
logging.debug("option_dict is %s", option_dict)
# A backup of original vm
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
logging.debug("original xml is %s", vmxml_backup)
# Generate empty image for negative test
if bad_disk is not None:
bad_disk = os.path.join(test.virtdir, bad_disk)
os.open(bad_disk, os.O_RDWR | os.O_CREAT)
# Generate external disk
if external_disk is not None:
external_disk = os.path.join(test.virtdir, external_disk)
commands.getoutput("qemu-img create -f qcow2 %s 1G" % external_disk)
try:
# Start qemu-ga on guest if have --quiesce
if options.find("quiesce") >= 0:
if vm.is_alive():
vm.destroy()
virt_xml_obj = libvirt_xml.VMXML(virsh_instance=virsh)
virt_xml_obj.set_agent_channel(vm_name)
vm.start()
if start_ga == "yes":
session = vm.wait_for_login()
# Check if qemu-ga already started automatically
cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
stat_install = session.cmd_status(cmd, 300)
if stat_install != 0:
raise error.TestFail("Fail to install qemu-guest-agent, make"
"sure that you have usable repo in guest")
# Check if qemu-ga already started
stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
if stat_ps != 0:
session.cmd("qemu-ga -d")
# Check if the qemu-ga really started
stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
if stat_ps != 0:
raise error.TestFail("Fail to run qemu-ga in guest")
if domain_state == "paused":
virsh.suspend(vm_name)
# Record the previous snapshot-list
snaps_before = virsh.snapshot_list(vm_name)
# Run virsh command
# May create several snapshots, according to configuration
for count in range(int(multi_num)):
cmd_result = virsh.snapshot_create_as(vm_name, options,
ignore_status=True, debug=True)
output = cmd_result.stdout.strip()
status = cmd_result.exit_status
# check status_error
if status_error == "yes":
if status == 0:
raise error.TestFail("Run successfully with wrong command!")
else:
# Check memspec file should be removed if failed
if (options.find("memspec") >= 0
and options.find("atomic") >= 0):
if os.path.isfile(option_dict['memspec']):
os.remove(option_dict['memspec'])
raise error.TestFail("Run failed but file %s exist"
% option_dict['memspec'])
else:
logging.info("Run failed as expected and memspec file"
" already beed removed")
else:
logging.info("Run failed as expected")
elif status_error == "no":
if status != 0:
raise error.TestFail("Run failed with right command: %s"
% output)
else:
# Check the special options
snaps_list = virsh.snapshot_list(vm_name)
logging.debug("snaps_list is %s", snaps_list)
check_snapslist(vm_name, options, option_dict, output,
snaps_before, snaps_list)
finally:
# Environment clean
if options.find("quiesce") >= 0 and start_ga == "yes":
session.cmd("rpm -e qemu-guest-agent")
# recover domain xml
xml_recover(vmxml_backup)
path = "/var/lib/libvirt/qemu/snapshot/" + vm_name
if os.path.isfile(path):
raise error.TestFail("Still can find snapshot metadata")
# rm bad disks
if bad_disk is not None:
os.remove(bad_disk)
| gpl-2.0 |
chitr/neutron | neutron/db/migration/alembic_migrations/versions/57086602ca0a_scrap_nsx_adv_svcs_models.py | 47 | 1156 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""scrap_nsx_adv_svcs_models
Revision ID: 57086602ca0a
Revises: 28c0ffb8ebbd
Create Date: 2014-12-17 22:33:30.465392
"""
# revision identifiers, used by Alembic.
revision = '57086602ca0a'
down_revision = '28c0ffb8ebbd'
from alembic import op
def upgrade():
op.drop_table('vcns_edge_pool_bindings')
op.drop_table('vcns_firewall_rule_bindings')
op.drop_table('vcns_edge_monitor_bindings')
op.drop_table('vcns_edge_vip_bindings')
op.drop_table(u'routerservicetypebindings')
op.drop_table(u'servicerouterbindings')
| apache-2.0 |
victoredwardocallaghan/xen | tools/xm-test/tests/restore/01_restore_basic_pos.py | 42 | 2022 | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: Dan Smith <danms@us.ibm.com>
# Save a domain and attempt to restore it
#
# Since we don't want to depend on the fact that save/01_basic_pos.py
# ran successfully, we try to save the domain here again
import time
from XmTestLib import *
if ENABLE_HVM_SUPPORT:
SKIP("Restore currently not supported for HVM domains")
domain = XmTestDomain()
try:
console = domain.start()
except DomainError, e:
if verbose:
print "Failed to create test domain because:"
print e.extra
FAIL(str(e))
# Make sure the domain isn't DOA
try:
console.runCmd("foo=bar")
except ConsoleError, e:
FAIL(str(e))
domain.closeConsole()
# Save it out
try:
s, o = traceCommand("xm save %s /tmp/test.state" % domain.getName(),
timeout=30)
except TimeoutError, e:
FAIL(str(e))
if s != 0:
FAIL("save command exited %i != 0" % s)
# FIXME: Give the system some time to update the internal state
traceCommand("xm list")
# Make sure it's gone
if isDomainRunning(domain.getName()):
FAIL("Domain still running after save!")
# Let things settle
time.sleep(2)
# Restore it in
status, output = traceCommand("xm restore /tmp/test.state",
timeout=30)
if s != 0:
FAIL("restore command exited %i != 0" % s)
# Make sure it's running
if not isDomainRunning(domain.getName()):
FAIL("Restore didn't result in a running %s domain!" % domain.getName())
# Make sure it's alive
try:
newConsole = domain.getConsole()
# Enable debug dumping because this generates a Oops on x86_64
newConsole.debugMe = True
newConsole.sendInput("ls")
run = newConsole.runCmd("echo xx$foo")
if not re.search("bar", run["output"]):
FAIL("Restored domain has been reset")
except ConsoleError, e:
FAIL("Restored domain is dead (%s)" % str(e))
domain.closeConsole()
# This only works because the domain
# still has the same name
domain.stop()
| gpl-2.0 |
fzimmermann89/pyload | module/setup.py | 35 | 18929 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: RaNaN
"""
from getpass import getpass
import module.common.pylgettext as gettext
import os
from os import makedirs
from os.path import abspath
from os.path import dirname
from os.path import exists
from os.path import join
from subprocess import PIPE
from subprocess import call
import sys
from sys import exit
from module.utils import get_console_encoding
class Setup():
"""
pyLoads initial setup configuration assistent
"""
def __init__(self, path, config):
self.path = path
self.config = config
self.stdin_encoding = get_console_encoding(sys.stdin.encoding)
def start(self):
langs = self.config.getMetaData("general", "language")["type"].split(";")
lang = self.ask(u"Choose your Language / Wähle deine Sprache", "en", langs)
gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
translation = gettext.translation("setup", join(self.path, "locale"), languages=[lang, "en"], fallback=True)
translation.install(True)
#Input shorthand for yes
self.yes = _("y")
#Input shorthand for no
self.no = _("n")
# print ""
# print _("Would you like to configure pyLoad via Webinterface?")
# print _("You need a Browser and a connection to this PC for it.")
# viaweb = self.ask(_("Start initial webinterface for configuration?"), "y", bool=True)
# if viaweb:
# try:
# from module.web import ServerThread
# ServerThread.setup = self
# from module.web import webinterface
# webinterface.run_simple()
# return False
# except Exception, e:
# print "Setup failed with this error: ", e
# print "Falling back to commandline setup."
print ""
print _("Welcome to the pyLoad Configuration Assistent.")
print _("It will check your system and make a basic setup in order to run pyLoad.")
print ""
print _("The value in brackets [] always is the default value,")
print _("in case you don't want to change it or you are unsure what to choose, just hit enter.")
print _(
"Don't forget: You can always rerun this assistent with --setup or -s parameter, when you start pyLoadCore.")
print _("If you have any problems with this assistent hit STRG-C,")
print _("to abort and don't let him start with pyLoadCore automatically anymore.")
print ""
print _("When you are ready for system check, hit enter.")
raw_input()
basic, ssl, captcha, gui, web, js = self.system_check()
print ""
if not basic:
print _("You need pycurl, sqlite and python 2.5, 2.6 or 2.7 to run pyLoad.")
print _("Please correct this and re-run pyLoad.")
print _("Setup will now close.")
raw_input()
return False
raw_input(_("System check finished, hit enter to see your status report."))
print ""
print _("## Status ##")
print ""
avail = []
if self.check_module("Crypto"): avail.append(_("container decrypting"))
if ssl: avail.append(_("ssl connection"))
if captcha: avail.append(_("automatic captcha decryption"))
if gui: avail.append(_("GUI"))
if web: avail.append(_("Webinterface"))
if js: avail.append(_("extended Click'N'Load"))
string = ""
for av in avail:
string += ", " + av
print _("Features available:") + string[1:]
print ""
if len(avail) < 5:
print _("Featues missing: ")
print
if not self.check_module("Crypto"):
print _("no py-crypto available")
print _("You need this if you want to decrypt container files.")
print ""
if not ssl:
print _("no SSL available")
print _("This is needed if you want to establish a secure connection to core or webinterface.")
print _("If you only want to access locally to pyLoad ssl is not usefull.")
print ""
if not captcha:
print _("no Captcha Recognition available")
print _("Only needed for some hosters and as freeuser.")
print ""
if not gui:
print _("Gui not available")
print _("The Graphical User Interface.")
print ""
if not js:
print _("no JavaScript engine found")
print _("You will need this for some Click'N'Load links. Install Spidermonkey, ossp-js, pyv8 or rhino")
print _("You can abort the setup now and fix some dependicies if you want.")
con = self.ask(_("Continue with setup?"), self.yes, bool=True)
if not con:
return False
print ""
print _("Do you want to change the config path? Current is %s") % abspath("")
print _(
"If you use pyLoad on a server or the home partition lives on an iternal flash it may be a good idea to change it.")
path = self.ask(_("Change config path?"), self.no, bool=True)
if path:
self.conf_path()
#calls exit when changed
print ""
print _("Do you want to configure login data and basic settings?")
print _("This is recommend for first run.")
con = self.ask(_("Make basic setup?"), self.yes, bool=True)
if con:
self.conf_basic()
if ssl:
print ""
print _("Do you want to configure ssl?")
ssl = self.ask(_("Configure ssl?"), self.no, bool=True)
if ssl:
self.conf_ssl()
if web:
print ""
print _("Do you want to configure webinterface?")
web = self.ask(_("Configure webinterface?"), self.yes, bool=True)
if web:
self.conf_web()
print ""
print _("Setup finished successfully.")
print _("Hit enter to exit and restart pyLoad")
raw_input()
return True
def system_check(self):
""" make a systemcheck and return the results"""
print _("## System Check ##")
if sys.version_info[:2] > (2, 7):
print _("Your python version is to new, Please use Python 2.6/2.7")
python = False
elif sys.version_info[:2] < (2, 5):
print _("Your python version is to old, Please use at least Python 2.5")
python = False
else:
print _("Python Version: OK")
python = True
curl = self.check_module("pycurl")
self.print_dep("pycurl", curl)
sqlite = self.check_module("sqlite3")
self.print_dep("sqlite3", sqlite)
basic = python and curl and sqlite
print ""
crypto = self.check_module("Crypto")
self.print_dep("pycrypto", crypto)
ssl = self.check_module("OpenSSL")
self.print_dep("py-OpenSSL", ssl)
print ""
pil = self.check_module("Image")
self.print_dep("py-imaging", pil)
if os.name == "nt":
tesser = self.check_prog([join(pypath, "tesseract", "tesseract.exe"), "-v"])
else:
tesser = self.check_prog(["tesseract", "-v"])
self.print_dep("tesseract", tesser)
captcha = pil and tesser
print ""
gui = self.check_module("PyQt4")
self.print_dep("PyQt4", gui)
print ""
jinja = True
try:
import jinja2
v = jinja2.__version__
if v and "unknown" not in v:
if not v.startswith("2.5") and not v.startswith("2.6"):
print _("Your installed jinja2 version %s seems too old.") % jinja2.__version__
print _("You can safely continue but if the webinterface is not working,")
print _("please upgrade or deinstall it, pyLoad includes a sufficient jinja2 libary.")
print
jinja = False
except:
pass
self.print_dep("jinja2", jinja)
beaker = self.check_module("beaker")
self.print_dep("beaker", beaker)
web = sqlite and beaker
from module.common import JsEngine
js = True if JsEngine.ENGINE else False
self.print_dep(_("JS engine"), js)
return basic, ssl, captcha, gui, web, js
def conf_basic(self):
print ""
print _("## Basic Setup ##")
print ""
print _("The following logindata is valid for CLI, GUI and webinterface.")
from module.database import DatabaseBackend
db = DatabaseBackend(None)
db.setup()
username = self.ask(_("Username"), "User")
password = self.ask("", "", password=True)
db.addUser(username, password)
db.shutdown()
print ""
print _("External clients (GUI, CLI or other) need remote access to work over the network.")
print _("However, if you only want to use the webinterface you may disable it to save ram.")
self.config["remote"]["activated"] = self.ask(_("Enable remote access"), self.yes, bool=True)
print ""
langs = self.config.getMetaData("general", "language")
self.config["general"]["language"] = self.ask(_("Language"), "en", langs["type"].split(";"))
self.config["general"]["download_folder"] = self.ask(_("Downloadfolder"), "Downloads")
self.config["download"]["max_downloads"] = self.ask(_("Max parallel downloads"), "3")
#print _("You should disable checksum proofing, if you have low hardware requirements.")
#self.config["general"]["checksum"] = self.ask(_("Proof checksum?"), "y", bool=True)
reconnect = self.ask(_("Use Reconnect?"), self.no, bool=True)
self.config["reconnect"]["activated"] = reconnect
if reconnect:
self.config["reconnect"]["method"] = self.ask(_("Reconnect script location"), "./reconnect.sh")
def conf_web(self):
print ""
print _("## Webinterface Setup ##")
print ""
self.config["webinterface"]["activated"] = self.ask(_("Activate webinterface?"), self.yes, bool=True)
print ""
print _("Listen address, if you use 127.0.0.1 or localhost, the webinterface will only accessible locally.")
self.config["webinterface"]["host"] = self.ask(_("Address"), "0.0.0.0")
self.config["webinterface"]["port"] = self.ask(_("Port"), "8000")
print ""
print _("pyLoad offers several server backends, now following a short explanation.")
print "builtin:", _("Default server, best choice if you dont know which one to choose.")
print "threaded:", _("This server offers SSL and is a good alternative to builtin.")
print "fastcgi:", _(
"Can be used by apache, lighttpd, requires you to configure them, which is not too easy job.")
print "lightweight:", _("Very fast alternative written in C, requires libev and linux knowlegde.")
print "\t", _("Get it from here: https://github.com/jonashaag/bjoern, compile it")
print "\t", _("and copy bjoern.so to module/lib")
print
print _(
"Attention: In some rare cases the builtin server is not working, if you notice problems with the webinterface")
print _("come back here and change the builtin server to the threaded one here.")
self.config["webinterface"]["server"] = self.ask(_("Server"), "builtin",
["builtin", "threaded", "fastcgi", "lightweight"])
def conf_ssl(self):
print ""
print _("## SSL Setup ##")
print ""
print _("Execute these commands from pyLoad config folder to make ssl certificates:")
print ""
print "openssl genrsa -out ssl.key 1024"
print "openssl req -new -key ssl.key -out ssl.csr"
print "openssl req -days 36500 -x509 -key ssl.key -in ssl.csr > ssl.crt "
print ""
print _("If you're done and everything went fine, you can activate ssl now.")
self.config["ssl"]["activated"] = self.ask(_("Activate SSL?"), self.yes, bool=True)
def set_user(self):
gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
translation = gettext.translation("setup", join(self.path, "locale"),
languages=[self.config["general"]["language"], "en"], fallback=True)
translation.install(True)
from module.database import DatabaseBackend
db = DatabaseBackend(None)
db.setup()
noaction = True
try:
while True:
print _("Select action")
print _("1 - Create/Edit user")
print _("2 - List users")
print _("3 - Remove user")
print _("4 - Quit")
action = raw_input("[1]/2/3/4: ")
if not action in ("1", "2", "3", "4"):
continue
elif action == "1":
print ""
username = self.ask(_("Username"), "User")
password = self.ask("", "", password=True)
db.addUser(username, password)
noaction = False
elif action == "2":
print ""
print _("Users")
print "-----"
users = db.listUsers()
noaction = False
for user in users:
print user
print "-----"
print ""
elif action == "3":
print ""
username = self.ask(_("Username"), "")
if username:
db.removeUser(username)
noaction = False
elif action == "4":
break
finally:
if not noaction:
db.shutdown()
def conf_path(self, trans=False):
if trans:
gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
translation = gettext.translation("setup", join(self.path, "locale"),
languages=[self.config["general"]["language"], "en"], fallback=True)
translation.install(True)
print _("Setting new configpath, current configuration will not be transfered!")
path = self.ask(_("Configpath"), abspath(""))
try:
path = join(pypath, path)
if not exists(path):
makedirs(path)
f = open(join(pypath, "module", "config", "configdir"), "wb")
f.write(path)
f.close()
print _("Configpath changed, setup will now close, please restart to go on.")
print _("Press Enter to exit.")
raw_input()
exit()
except Exception, e:
print _("Setting config path failed: %s") % str(e)
def print_dep(self, name, value):
"""Print Status of dependency"""
if value:
print _("%s: OK") % name
else:
print _("%s: missing") % name
def check_module(self, module):
try:
__import__(module)
return True
except:
return False
def check_prog(self, command):
pipe = PIPE
try:
call(command, stdout=pipe, stderr=pipe)
return True
except:
return False
def ask(self, qst, default, answers=[], bool=False, password=False):
"""produce one line to asking for input"""
if answers:
info = "("
for i, answer in enumerate(answers):
info += (", " if i != 0 else "") + str((answer == default and "[%s]" % answer) or answer)
info += ")"
elif bool:
if default == self.yes:
info = "([%s]/%s)" % (self.yes, self.no)
else:
info = "(%s/[%s])" % (self.yes, self.no)
else:
info = "[%s]" % default
if password:
p1 = True
p2 = False
while p1 != p2:
# getpass(_("Password: ")) will crash on systems with broken locales (Win, NAS)
sys.stdout.write(_("Password: "))
p1 = getpass("")
if len(p1) < 4:
print _("Password too short. Use at least 4 symbols.")
continue
sys.stdout.write(_("Password (again): "))
p2 = getpass("")
if p1 == p2:
return p1
else:
print _("Passwords did not match.")
while True:
try:
input = raw_input(qst + " %s: " % info)
except KeyboardInterrupt:
print "\nSetup interrupted"
exit()
input = input.decode(self.stdin_encoding)
if input.strip() == "":
input = default
if bool:
# yes, true,t are inputs for booleans with value true
if input.lower().strip() in [self.yes, _("yes"), _("true"), _("t"), "yes"]:
return True
# no, false,f are inputs for booleans with value false
elif input.lower().strip() in [self.no, _("no"), _("false"), _("f"), "no"]:
return False
else:
print _("Invalid Input")
continue
if not answers:
return input
else:
if input in answers:
return input
else:
print _("Invalid Input")
if __name__ == "__main__":
test = Setup(join(abspath(dirname(__file__)), ".."), None)
test.start()
| gpl-3.0 |
anguoyang/SMQTK | python/EventContentDescriptor/kernels.py | 3 | 12623 | """
LICENCE
-------
Copyright 2013 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
import numpy as np
import math
import timeit
#import homkermap_wrapper as homkermap
#import multiprocessing as mp
__all__ = ['hik', 'compute_hik_matrix_help', 'compute_hik_matrix']
# Do not change this
IDXS_KERNELS = []
IDXS_KERNELS.append(('linear', 0))
IDXS_KERNELS.append(('ngd', 1))
IDXS_KERNELS.append(('hik', 2))
def compute_kernel_matrix(data1, data2=None, func_kernel=None,
recounting=False,
diag_zero=False, verbose=-1, mirror=False):
"""
Given two numpy matrices as inputs (with row-wise d-dimensional data),
compute kernel values between row-wise data1 & data2, and return the matrix.
By default, diagonal is computed too, but, can be optionally turned off to be set to be zeros
@param data1: n1-by-d row-wise data matrix in numpy format
@param data2: n2-by-d row-wise data matrix in numpy format
@param func_kernel: kernel function (e.g., functions from kernels library)
@param report_margins_recounting: if True, when appropriate, output n1-d-DIM matrix to show contribution by each bin towards kernel computation. DIM is the feature dimension.
@type report_margins_recounting: bool
@param diag_zero: if True, set all the diagonal entries to zero, default = False
@param verbose: integer, generate more message
@param mirror: if True (should be set only when kernel matrix is square), compute lower left, then, copy to top right
@return: a dictionary with 'kernel_matrix' (n1-by-n2), and (optional) 'kernel_matrix_recounting' (n1-by-n2-by-dim)
"""
n1 = data1.shape[0]
if data2 == None:
data2 = data1
n2 = data2.shape[0]
dim = len(data1[0])
#print 'Kernel Matrix size = (%d,%d)'%(n1,n2)
mat = np.zeros((n1,n2), dtype=np.float32)
mat_recounting = None
if recounting:
mat_recounting = np.zeros((n1,n2,dim))
if mirror == True:
for i in range(n1):
if verbose!=-1 and (i%verbose)==0:
print '\t row %d ..'%i
for j in range(i+1):
if j==i and diag_zero:
continue
# consider using inc_bin_kernel here
output = func_kernel(data1[i], data2[j], recounting = recounting)
mat[i,j] = output['kernel']
mat[j,i] = mat[i,j]
if recounting:
mat_recounting[i,j] = output['recounting']
mat_recounting[j,i] = mat_recounting[i,j]
else:
for i in range(n1):
if verbose!=-1 and (i%verbose)==0:
print '\t row %d ..'%i
for j in range(n2):
if j==i and diag_zero:
continue
# consider using inc_bin_kernel here
output = func_kernel(data1[i], data2[j], recounting = recounting)
mat[i,j] = output['kernel']
if recounting:
mat_recounting[i,j] = output['recounting']
outputs = dict()
outputs['kernel_matrix'] = mat
outputs['kernel_matrix_recounting'] = mat_recounting
return outputs
######################################
# HIK: histogram intersection kernels
######################################
def hik(hist1, hist2, recounting=False):
"""histogram intersection kernel between two histograms
@param hist1: vector
@type hist1: 1D numpy.array
@param hist2: vector
@type hist2: 1D numpy.array
@param recounting: if True, return bin-wise outputs as well
@type recounting: bool
@return: dictionary of 'kernel' value and (optional) 'recounting'
@rtype: dict of (float, (optional) np.array)
"""
bins = (hist1 + hist2 - np.abs(hist1 - hist2)) * 0.5
output = dict()
output['kernel'] = bins.sum()
if recounting:
output['recounting'] = bins
return output
def hik1(hist1, hist2, recounting = False):
"""histogram intersection kernel between two histograms
another hik implementation.append
NOTE: much slower than hik, don't use. This is slower than hik1
@param hist1: vector
@type hist1: 1D numpy.array
@param hist2: vector
@type hist2: 1D numpy.array
@param recounting: if True, return bin-wise outputs as well
@type recounting: bool
@return: dictionary of 'kernel' value and (optional) 'recounting'
@rtype: dict of (float, (optional) np.array)
"""
bins = np.amin(np.vstack((hist1, hist2)), axis=0)
output = dict()
output['kernel'] = bins.sum()
if recounting:
output['recounting'] = bins
return output
def compare_hik_hik1(n=500, d=100, repeat=5):
"""
Compare speed of hik vs hik1 implementation,
by computing HIK matrix on randomly generated data
@param n: number of data points
@param d: dimension of every vector
"""
import time
for i in range(repeat):
print '------------------------------------------------------'
print 'testing %d / %d'%(i+1, repeat)
print '------------------------------------------------------'
data = np.random.rand(n, d)
print '... data sampled'
print '... computing matrix 1'
start1 = time.clock()
kernel_matrix1 = compute_kernel_matrix(data, func_kernel=hik)['kernel_matrix']
time1 = time.clock() - start1
print '... computing matrix 2'
start2 = time.clock()
kernel_matrix2 = compute_kernel_matrix(data, func_kernel=hik1)['kernel_matrix']
time2 = time.clock() - start2
print 'time by hik = ', time1
print 'time by hik1 = ', time2
print 'results match each other = ', np.array_equal(kernel_matrix1, kernel_matrix2)
#
#def compute_hik_matrix_help(data1, data2=None, diag_zero=False):
# """
# Given two numpy matrices as inputs,
# compute HIK matrix between row-wise data1 & data2,
# and return the matrix.
# By default, diagonal is computed too, but, can be optionally turned off to be set to be zeros
# """
#
# return compute_kernel_matrix(data1, data2, func_kernel=hik, diag_zero=diag_zero)
# The code below is very slow, not worth it unless massively (>100 threads) perhaps.
# n1 = data1.shape[0]
#
# if data2 == None:
# data2 = data1
#
# n2 = data2.shape[0]
#
# print 'HIK matrix size = (%d,%d)'%(n1,n2)
#
# nr_processor = 7
# pool = mp.Pool(nr_processor)
#
# def compute_ij(job):
# (data1_, data2_, mat_, i, j) = job
# mat_[i,j] = hik(data1_[i], data2_[j])
#
# mat = np.zeros((n1,n2), dtype=np.float32)
#
# jobs = []
# for i in range(n1):
# for j in range(n2):
# jobs.append((data1, data2, mat, i, j))
#
# pool.map(compute_ij, jobs)
# return mat
#def compute_hik_matrix(filein1, filein2, fileout):
# """
# Compute full HIK kernel matrix between two sets or itself
# Possibly for pagerank algorithm
# """
# data1 = np.loadtxt(filein1)
# print 'compute_hik_matrix: loaded data1 = %s'%filein1
# data2 = None
# if filein2 == None:
# data2 = data1
# else:
# data2 = np.loadtxt(filein2)
#
# print 'compute_hik_matrix: loaded data2 = %s'%filein2
# mat = compute_hik_matrix_help(data1, data2, True)
#
# # make each row into a markov transition probabilities summing to one
# n1 = data1.shape[0]
# for i in range(n1):
# sumi = mat[i,:].sum()
# if sumi > 0:
# mat[i,:] /= sumi
#
# np.savetxt(fileout, mat, fmt='%g')
######################################
# Linear kernel (very basic)
######################################
def linear(data1, data2=None, recounting=False):
"""
Simple linear kernel
@param recounting: if True, return bin-wise outputs as well
@type recounting: bool
@return: dictionary of 'kernel' value and (optional) 'recounting'
@rtype: dict of (float, (optional) np.array)
"""
if data2 == None:
data2 = data1
bins = data1 * data2
kernel = bins.sum()
output = dict()
output['kernel'] = kernel
if recounting:
output['recounting'] = bins
return output
def L1norm_linear(data1, data2=None, recounting=False):
"""
Linear kernel computation followed by L1-normalization.
It is assumed that L1 normalization is already conducted.
Hence, simply 'linear' kernel is computed.
@param recounting: if True, return bin-wise outputs as well
@type recounting: bool
@return: dictionary of 'kernel' value and (optional) 'recounting'
@rtype: dict of (float, (optional) np.array)
"""
return linear(data1, data2=data2, recounting = recounting)
######################################
# Homogeneous kernel map
# based on Andrea Vedaldi's work
######################################
#def compute_kernel_matrix_homkermap(data1, data2=None,
# N=3, kernelType = homkermap.VlHomogeneousKernelIntersection,
# gamma = 1.0, period = -1.0,
# windowType = homkermap.VlHomogeneousKernelMapWindowRectangular,
# dtype = np.float32):
# """
# Approximate kernel matrix computation using Homogeneous kernel map.
# Supports HIK, Chi2, Jensen-Shannon
# For detailed parameter specification, please look at homkermap_wrapper.homkermap
#
# @param data1: row-wise data array in numpy array format
# @param data2: row-wise data array in numpy array format
# @return: kernel matrix between data1 & data2.
# """
#
# data1_map = homkermap.homkermap(data1, N=N, kernelType=kernelType,
# gamma=gamma, period=period, windowType=windowType,
# dtype = dtype)
# data1_map = np.mat(data1_map)
#
# if data2 == None:
# data2_map = data1_map
# else:
# data2_map = homkermap.homkermap(data2, N=N, kernelType=kernelType,
# gamma=gamma, period=period, windowType=windowType,
# dtype = dtype)
#
# n1 = data1_map.shape[0]
# n2 = data2_map.shape[0]
#
# mat = data1_map * data2_map.T
#
#
## #print 'Kernel Matrix size = (%d,%d)'%(n1,n2)
## mat = np.zeros((n1,n2), dtype=np.float32)
##
## if data2 == None:
## for i in range(n1):
## for j in range(i+1):
## mat[i,j] = np.inner(data1_map[i], data2_map[j])
## mat[j,i] = mat[i,j]
## else:
## for i in range(n1):
## for j in range(n2):
## mat[i,j] = np.inner(data1[i], data2[j])
# return mat
####################################################################################################
# NGD: negative geodesic distance kernel
# ala Dell Zhang's SIGIR 2005 paper "Text Classification with Kernels on the Multinomial Manifold
####################################################################################################
def ngd(mult1_sqrt, mult2_sqrt, recounting=False):
"""
negative geodesic distance Kernel for normalized histograms (equivalently multinomial)
ala Dell Zhang's SIGIR 2005 paper
@param mult1_sqroot: Square root vector of L1-normalized multinomial (histogram), numpy format
@param mult2_sqroot: same as above for mult1_sqroot
@param recounting: if True, return bin-wise outputs as well
@type recounting: bool
@return: dictionary of 'kernel' value and (optional) 'recounting'
@rtype: dict of (float, (optional) np.array)
"""
bins = mult1_sqrt * mult2_sqrt
tmp1 = bins.sum()
if tmp1 > 1.0:
tmp1 = 1.0
kernel = (-2.0 * math.acos(tmp1))
output = dict()
output['kernel'] = kernel
if recounting:
output['recounting'] = bins
return output
def ngd_dist(mult1_sqroot, mult2_sqrt):
"""
negative geodesic distance! (not kernel) for normalized histograms (equivalently multinomial)
ala Dell Zhang's SIGIR 2005 paper
@param mult1_sqroot: Square root vector of L1-normalized multinomial (histogram), numpy format
@param mult2_sqroot: same as above for mult1_sqroot
@return: ngd distance
"""
tmp1 = np.inner(mult1_sqroot, mult2_sqrt);
if tmp1 > 1.0:
tmp1 = 1.0
return math.acos(tmp1)
if __name__ == "__main__":
# tests the speed of different HIK implementations
compare_hik_hik1()
| bsd-3-clause |
vadimtk/chrome4sdp | third_party/mojo/src/mojo/public/tools/mojom_fetcher/mojom_gn.py | 26 | 2651 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""BUILD file generator for mojoms."""
import argparse
import errno
import imp
import logging
import os
import sys
import urllib2
# Local library
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pylib"))
# Bindings library
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)),
"..", "bindings", "pylib"))
# Jinja2 library
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)),
"..", "..", "third_party"))
import jinja2
from fetcher.repository import Repository
class BuildGNGenerator(object):
def __init__(self, repository, template_dir):
self._repository = repository
self._import_dirs = []
self.environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir))
def generate(self):
build_gn_tmpl = self.environment.get_template('build_gn.tmpl')
directories = self._repository.get_all_external_mojom_directories()
for directory in directories:
logging.debug("Generating %s", directory.get_build_gn_path())
params = directory.get_jinja_parameters(self._import_dirs)
f = self._open(directory.get_build_gn_path(), "w")
f.write(build_gn_tmpl.render(**params))
def add_import_dirs(self, import_dirs):
self._import_dirs.extend(import_dirs)
def _open(self, filename, mode):
return open(filename, mode)
def _main(args):
repository_path = os.path.abspath(args.repository_path)
repository = Repository(repository_path, args.external_dir)
gn_generator = BuildGNGenerator(
repository, os.path.dirname(os.path.abspath(__file__)))
if args.extra_import_dirs:
gn_generator.add_import_dirs(args.extra_import_dirs)
gn_generator.generate()
def main():
logging.basicConfig(level=logging.WARNING)
parser = argparse.ArgumentParser(
description='Generate BUILD.gn files for mojoms.')
parser.add_argument('--repository-path', type=str, default='.',
help='The path to the client repository.')
parser.add_argument('--external-dir', type=str, default='external',
help='Directory for external interfaces')
parser.add_argument(
'--extra-import-dirs', type=str, action='append',
help='Additional directories to search for imported mojoms.')
args = parser.parse_args()
return _main(args)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
valsson/MD-MC-Codes-2016 | HarmonicOscillator-MD/HarmonicOscillator-MD-Verlet.py | 1 | 4262 | #! /usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from DataTools import writeDataToFile
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--time-step',dest='time_step',required=False)
parser.add_argument('--output-file',dest='fn_out',required=False)
args = parser.parse_args()
# Parameters of potential
m = 1.0
k = (2.0*np.pi)**2
angular_freq = np.sqrt(k/m)
freq = angular_freq/(2.0*np.pi)
period = 1.0/freq
# MD Parameters
if(args.time_step):
time_step = np.float64(args.time_step)
else:
time_step = 0.01*period
if(args.fn_out):
fn_out = args.fn_out
else:
fn_out = 'results.data'
showPlots = False
#num_periods = 20
#num_steps = np.int(np.rint( (num_periods*period)/time_step ))
num_steps = 10000
# initial postion and velocity at t=0
initial_position = 2.0
initial_velocity = 0.0
def getPotentialEnergy(x):
potential_ener = 0.5*k*x**2
return potential_ener
#-------------------------------
def getForce(x):
force = -k*x
return force
#-------------------------------
def getAccleration(x):
return getForce(x)/m
#-------------------------------
def getPotentialAndForce(x):
return ( getPotentialEnergy(x), getForce(x) )
#-------------------------------
def getKineticEnergy(v):
kinetic_ener = 0.5*m*v**2
return kinetic_ener
#-------------------------------
def getTotalEnergy(x,v):
return getPotentialEnergy(x)+getKineticEnergy(v)
#-------------------------------
# analytical solution:
phi = np.arctan(-initial_velocity/(initial_position*angular_freq))
amplitude = initial_position/np.cos(phi)
conserved_energy = getPotentialEnergy(amplitude)
# ----------------------
times = []
positions = []
velocites = []
pot_energies = []
kin_energies = []
tot_energies = []
time = 0.0
curr_position = initial_position
prev_position = curr_position-initial_velocity*time_step + 0.5*getAccleration(curr_position)*time_step**2
curr_velocity = initial_velocity
for i in range(num_steps):
if (i+1) % (num_steps/10) == 0:
print 'MD step {0:6d} of {1:6d}'.format(i+1,num_steps)
# get force at t
accleration = getAccleration(curr_position)
# get new position at t+dt
new_position = 2.0*curr_position - prev_position + accleration*time_step**2
# get velocity at t
curr_velocity = (new_position - prev_position) / (2.0*time_step)
# get energies at t
curr_pot_ener = getPotentialEnergy(curr_position)
curr_kin_ener = getKineticEnergy(curr_velocity)
curr_tot_ener = curr_pot_ener + curr_kin_ener
#
times.append( time )
positions.append( curr_position )
velocites.append( curr_velocity )
pot_energies.append( curr_pot_ener )
kin_energies.append( curr_kin_ener )
tot_energies.append( curr_tot_ener )
#
prev_position = curr_position
curr_position = new_position
time += time_step
#
#----------------------------------------
times = np.array(times)
positions = np.array(positions)
velocites = np.array(velocites)
pot_energies = np.array(pot_energies)
kin_energies = np.array(kin_energies)
tot_energies = np.array(tot_energies)
positions_analytical = amplitude*np.cos(angular_freq*times+phi)
velocites_analytical = -angular_freq*amplitude*np.sin(angular_freq*times+phi)
writeDataToFile(fn_out,
[times,positions,velocites,pot_energies,kin_energies,tot_energies,positions_analytical,velocites_analytical],
['time','pos','vel','pot_ene','kin_ene','tot_ene','pos_an','vel_an'],
constantsNames=['time_step','period','amplitude','k','m','phi','conserved_energy'],
constantsValues=[time_step,period,amplitude,k,m,phi,conserved_energy],
dataFormat='%15.8f')
if showPlots:
plt.figure(1)
plt.plot(times,tot_energies)
plt.plot(times,pot_energies)
plt.plot(times,kin_energies)
plt.show()
plt.figure(2)
plt.plot(times,pot_energies)
plt.show()
plt.figure(3)
plt.plot(times,kin_energies)
plt.show()
plt.figure(4)
plt.plot(times,velocites)
plt.show()
plt.figure(5)
plt.plot(times,positions)
plt.plot(times,positions_analytical)
plt.show()
plt.figure(6)
plt.plot(times,positions-positions_analytical)
plt.show()
#
| mit |
Sweetgrassbuffalo/ReactionSweeGrass-v2 | .meteor/local/dev_bundle/python/Lib/site-packages/pip/utils/__init__.py | 124 | 27531 | from __future__ import absolute_import
from collections import deque
import contextlib
import errno
import io
import locale
# we have a submodule named 'logging' which would shadow this if we used the
# regular name:
import logging as std_logging
import re
import os
import posixpath
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from pip.exceptions import InstallationError
from pip.compat import console_to_str, expanduser, stdlib_pkgs
from pip.locations import (
site_packages, user_site, running_under_virtualenv, virtualenv_no_global,
write_delete_marker_file,
)
from pip._vendor import pkg_resources
from pip._vendor.six.moves import input
from pip._vendor.six import PY2
from pip._vendor.retrying import retry
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
__all__ = ['rmtree', 'display_path', 'backup_dir',
'ask', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'remove_tracebacks', 'ensure_dir',
'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS',
'get_installed_version']
logger = std_logging.getLogger(__name__)
BZ2_EXTENSIONS = ('.tar.bz2', '.tbz')
XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma')
ZIP_EXTENSIONS = ('.zip', '.whl')
TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')
ARCHIVE_EXTENSIONS = (
ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS)
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
logger.debug('lzma module is not available')
def import_or_raise(pkg_or_module_string, ExceptionType, *args, **kwargs):
try:
return __import__(pkg_or_module_string)
except ImportError:
raise ExceptionType(*args, **kwargs)
def ensure_dir(path):
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options))
)
else:
return response
def format_size(bytes):
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk
def split_leading_dir(path):
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def normalize_path(path, resolve_symlinks=True):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path)
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in
distutils.sysconfig.get_python_lib().
"""
return normalize_path(
dist_location(dist)
).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
if os.path.isfile(egg_link):
return True
return False
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return not dist_is_editable(d)
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
return [d for d in pkg_resources.working_set
if local_test(d) and
d.key not in skip and
editable_test(d) and
editables_only_test(d) and
user_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack(
'hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')
)
except:
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = 'r:xz'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesn't seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
tar._extract_member(member, path)
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
with open(path, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
tar.utime(member, path)
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip' or
filename.lower().endswith(ZIP_EXTENSIONS) or
zipfile.is_zipfile(filename)):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (content_type == 'application/x-gzip' or
tarfile.is_tarfile(filename) or
filename.lower().endswith(
TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html') and
is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of %s' % location
)
def remove_tracebacks(output):
pattern = (r'(?:\W+File "(?:.*)", line (?:.*)\W+(?:.*)\W+\^\W+)?'
r'Syntax(?:Error|Warning): (?:.*)')
output = re.sub(pattern, '', output)
if PY2:
return output
# compileall.compile_dir() prints different messages to stdout
# in Python 3
return re.sub(r"\*\*\* Error compiling (?:.*)", '', output)
def call_subprocess(cmd, show_stdout=True, cwd=None,
on_returncode='raise',
command_level=std_logging.DEBUG, command_desc=None,
extra_environ=None, spinner=None):
# This function's handling of subprocess output is confusing and I
# previously broke it terribly, so as penance I will write a long comment
# explaining things.
#
# The obvious thing that affects output is the show_stdout=
# kwarg. show_stdout=True means, let the subprocess write directly to our
# stdout. Even though it is nominally the default, it is almost never used
# inside pip (and should not be used in new code without a very good
# reason); as of 2016-02-22 it is only used in a few places inside the VCS
# wrapper code. Ideally we should get rid of it entirely, because it
# creates a lot of complexity here for a rarely used feature.
#
# Most places in pip set show_stdout=False. What this means is:
# - We connect the child stdout to a pipe, which we read.
# - By default, we hide the output but show a spinner -- unless the
# subprocess exits with an error, in which case we show the output.
# - If the --verbose option was passed (= loglevel is DEBUG), then we show
# the output unconditionally. (But in this case we don't want to show
# the output a second time if it turns out that there was an error.)
#
# stderr is always merged with stdout (even if show_stdout=True).
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
logger.log(command_level, "Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception as exc:
logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
if stdout is not None:
all_output = []
while True:
line = console_to_str(proc.stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if logger.getEffectiveLevel() <= std_logging.DEBUG:
# Show the line immediately
logger.debug(line)
else:
# Update the spinner
if spinner is not None:
spinner.spin()
proc.wait()
if spinner is not None:
if proc.returncode:
spinner.finish("error")
else:
spinner.finish("done")
if proc.returncode:
if on_returncode == 'raise':
if (logger.getEffectiveLevel() > std_logging.DEBUG and
not show_stdout):
logger.info(
'Complete output from command %s:', command_desc,
)
logger.info(
''.join(all_output) +
'\n----------------------------------------'
)
raise InstallationError(
'Command "%s" failed with error code %s in %s'
% (command_desc, proc.returncode, cwd))
elif on_returncode == 'warn':
logger.warning(
'Command "%s" had error code %s in %s',
command_desc, proc.returncode, cwd,
)
elif on_returncode == 'ignore':
pass
else:
raise ValueError('Invalid value: on_returncode=%s' %
repr(on_returncode))
if not show_stdout:
return remove_tracebacks(''.join(all_output))
def read_text_file(filename):
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preferred system encoding
(e.g., cp1252 on some Windows machines), and latin1, in that order.
Decoding a byte string with latin1 will never raise an error. In the worst
case, the returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
data = data.decode(enc)
except UnicodeDecodeError:
continue
break
assert type(data) != bytes # Latin1 should have worked.
return data
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output('stdout')
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def get_installed_version(dist_name):
"""Get the installed version of dist_name avoiding pkg_resources cache"""
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(dist_name)
# We want to avoid having this cached, so we need to construct a new
# working set each time.
working_set = pkg_resources.WorkingSet()
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
return dist.version if dist else None
def consume(iterator):
"""Consume an iterable at C speed."""
deque(iterator, maxlen=0)
| gpl-3.0 |
takaakiaoki/PyFoam | PyFoam/Applications/CommonLibFunctionTrigger.py | 3 | 2804 | """Implements a trigger that removes the libs and/or function
entry from the controlDict"""
import sys
from os import path
from optparse import OptionGroup
from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile
from PyFoam.Error import warning
class CommonLibFunctionTrigger(object):
""" The class that does the actual triggering
"""
def addOptions(self):
grp=OptionGroup(self.parser,
"Manipulating controlDict",
"Temporarily remove entries from the controlDict that are incompatible with some applications")
grp.add_option("--remove-libs",
action="store_true",
dest="removeLibs",
default=False,
help="Remove the libs entry from the controlDict for the duration of the application run")
grp.add_option("--remove-functions",
action="store_true",
dest="removeFunctions",
default=False,
help="Remove the functions entry from the controlDict for the duration of the application run")
self.parser.add_option_group(grp)
def addLibFunctionTrigger(self,run,sol):
if self.opts.removeLibs or self.opts.removeFunctions:
warning("Adding Trigger to reset lib/function at end")
trig=LibFunctionTrigger(sol,self.opts.removeLibs,self.opts.removeFunctions)
run.addEndTrigger(trig.resetIt)
class LibFunctionTrigger:
def __init__(self,sol,libs,funs):
self.control=ParsedParameterFile(path.join(sol.systemDir(),"controlDict"),
backup=True,
doMacroExpansion=True)
self.fresh=False
try:
if libs and ("libs" in self.control):
warning("Temporarily removing the libs-entry from the controlDict")
del self.control["libs"]
self.fresh=True
if funs and ("functions" in self.control):
warning("Temporarily removing the functions-entry from the controlDict")
del self.control["functions"]
self.fresh=True
if self.fresh:
self.control.writeFile()
else:
self.control.restore()
except Exception:
e = sys.exc_info()[1] # Needed because python 2.5 does not support 'as e'
warning("Restoring defaults")
self.control.restore()
raise e
def resetIt(self):
if self.fresh:
warning("Trigger called: Resetting controlDict")
self.control.restore()
self.fresh=False
# Should work with Python3 and Python2
| gpl-2.0 |
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/fork_wait.py | 129 | 2291 | """This test case provides support for checking forking and wait behavior.
To test different wait behavior, override the wait_impl method.
We want fork1() semantics -- only the forking thread survives in the
child after a fork().
On some systems (e.g. Solaris without posix threads) we find that all
active threads survive in the child after a fork(); this is an error.
While BeOS doesn't officially support fork and native threading in
the same application, the present example should work just fine. DC
"""
import os, sys, time, unittest
import test.test_support as test_support
thread = test_support.import_module('thread')
LONGSLEEP = 2
SHORTSLEEP = 0.5
NUM_THREADS = 4
class ForkWait(unittest.TestCase):
def setUp(self):
self.alive = {}
self.stop = 0
def f(self, id):
while not self.stop:
self.alive[id] = os.getpid()
try:
time.sleep(SHORTSLEEP)
except IOError:
pass
def wait_impl(self, cpid):
for i in range(10):
# waitpid() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status = os.waitpid(cpid, os.WNOHANG)
if spid == cpid:
break
time.sleep(2 * SHORTSLEEP)
self.assertEqual(spid, cpid)
self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
def test_wait(self):
for i in range(NUM_THREADS):
thread.start_new(self.f, (i,))
time.sleep(LONGSLEEP)
a = self.alive.keys()
a.sort()
self.assertEqual(a, range(NUM_THREADS))
prefork_lives = self.alive.copy()
if sys.platform in ['unixware7']:
cpid = os.fork1()
else:
cpid = os.fork()
if cpid == 0:
# Child
time.sleep(LONGSLEEP)
n = 0
for key in self.alive:
if self.alive[key] != prefork_lives[key]:
n += 1
os._exit(n)
else:
# Parent
self.wait_impl(cpid)
# Tell threads to die
self.stop = 1
time.sleep(2*SHORTSLEEP) # Wait for threads to die
| gpl-2.0 |
bsmr-ansible/ansible-modules-core | cloud/openstack/os_image.py | 68 | 6384 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#TODO(mordred): we need to support "location"(v1) and "locations"(v2)
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_image
short_description: Add/Delete images from OpenStack Cloud
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or Remove images from the OpenStack Image Repository
options:
name:
description:
- Name that has to be given to the image
required: true
default: None
disk_format:
description:
- The format of the disk that is getting uploaded
required: false
default: qcow2
container_format:
description:
- The format of the container
required: false
default: bare
owner:
description:
- The owner of the image
required: false
default: None
min_disk:
description:
- The minimum disk space (in GB) required to boot this image
required: false
default: None
min_ram:
description:
- The minimum ram (in MB) required to boot this image
required: false
default: None
is_public:
description:
- Whether the image can be accessed publicly. Note that publicizing an image requires admin role by default.
required: false
default: 'yes'
filename:
description:
- The path to the file which has to be uploaded
required: false
default: None
ramdisk:
description:
- The name of an existing ramdisk image that will be associated with this image
required: false
default: None
kernel:
description:
- The name of an existing kernel image that will be associated with this image
required: false
default: None
properties:
description:
- Additional properties to be associated with this image
required: false
default: {}
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements: ["shade"]
'''
EXAMPLES = '''
# Upload an image from a local file named cirros-0.3.0-x86_64-disk.img
- os_image:
auth:
auth_url: http://localhost/auth/v2.0
username: admin
password: passme
project_name: admin
name: cirros
container_format: bare
disk_format: qcow2
state: present
filename: cirros-0.3.0-x86_64-disk.img
kernel: cirros-vmlinuz
ramdisk: cirros-initrd
properties:
cpu_arch: x86_64
distro: ubuntu
'''
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']),
container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova']),
owner = dict(default=None),
min_disk = dict(type='int', default=0),
min_ram = dict(type='int', default=0),
is_public = dict(type='bool', default=False),
filename = dict(default=None),
ramdisk = dict(default=None),
kernel = dict(default=None),
properties = dict(type='dict', default={}),
state = dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
changed = False
image = cloud.get_image(name_or_id=module.params['name'])
if module.params['state'] == 'present':
if not image:
image = cloud.create_image(
name=module.params['name'],
filename=module.params['filename'],
disk_format=module.params['disk_format'],
container_format=module.params['container_format'],
wait=module.params['wait'],
timeout=module.params['timeout'],
is_public=module.params['is_public'],
min_disk=module.params['min_disk'],
min_ram=module.params['min_ram']
)
changed = True
if not module.params['wait']:
module.exit_json(changed=changed, image=image, id=image.id)
cloud.update_image_properties(
image=image,
kernel=module.params['kernel'],
ramdisk=module.params['ramdisk'],
**module.params['properties'])
image = cloud.get_image(name_or_id=image.id)
module.exit_json(changed=changed, image=image, id=image.id)
elif module.params['state'] == 'absent':
if not image:
changed = False
else:
cloud.delete_image(
name_or_id=module.params['name'],
wait=module.params['wait'],
timeout=module.params['timeout'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == "__main__":
main()
| gpl-3.0 |
OCA/business-requirement | business_requirement_sale/models/business_requirement.py | 1 | 1458 | # Copyright 2019 Tecnativa Victor M.M. Torres>
# Copyright 2019 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class BusinessRequirement(models.Model):
_inherit = 'business.requirement'
sale_order_ids = fields.One2many(
comodel_name='sale.order',
inverse_name='business_requirement_id',
string='Sales Orders',
)
sale_order_count = fields.Integer(
string='Sales Orders Count',
compute='_compute_sale_order_count',
)
@api.multi
@api.depends('sale_order_ids')
def _compute_sale_order_count(self):
groups = self.env['sale.order'].read_group(
domain=[('business_requirement_id', 'in', self.ids)],
fields=['business_requirement_id'],
groupby=['business_requirement_id'],
)
data = {
x['business_requirement_id'][0]: x['business_requirement_id_count']
for x in groups
}
for rec in self:
rec.sale_order_count = data.get(rec.id, 0)
@api.multi
def open_orders(self):
action = self.env.ref('sale.action_quotations').read()[0]
if len(self) == 1:
action['context'] = {
'search_default_business_requirement_id': self.id,
}
else:
action['domain'] = [('business_requirement_id', 'in', self.ids)],
return action
| agpl-3.0 |
deepmind/open_spiel | open_spiel/python/algorithms/external_sampling_mccfr_test.py | 1 | 4567 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.cfr."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import external_sampling_mccfr
import pyspiel
SEED = 39823987
class ExternalSamplingMCCFRTest(absltest.TestCase):
def test_external_sampling_leduc_2p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("leduc_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Leduc2P, conv = {}".format(conv))
self.assertLess(conv, 5)
# ensure that to_tabular() works on the returned policy and
# the tabular policy is equivalent
tabular_policy = es_solver.average_policy().to_tabular()
conv2 = exploitability.nash_conv(game, tabular_policy)
self.assertEqual(conv, conv2)
def test_external_sampling_leduc_2p_full(self):
np.random.seed(SEED)
game = pyspiel.load_game("leduc_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.FULL)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Leduc2P, conv = {}".format(conv))
self.assertLess(conv, 5)
def test_external_sampling_kuhn_2p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn2P, conv = {}".format(conv))
self.assertLess(conv, 1)
def test_external_sampling_kuhn_2p_full(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.FULL)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn2P, conv = {}".format(conv))
self.assertLess(conv, 1)
# Liar's dice takes too long, so disable this test. Leave code for reference.
# pylint: disable=g-unreachable-test-method
def disabled_test_external_sampling_liars_dice_2p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("liars_dice")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(1):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Liar's dice, conv = {}".format(conv))
self.assertLess(conv, 2)
def test_external_sampling_kuhn_3p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker", {"players": 3})
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn3P, conv = {}".format(conv))
self.assertLess(conv, 2)
def test_external_sampling_kuhn_3p_full(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker", {"players": 3})
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.FULL)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn3P, conv = {}".format(conv))
self.assertLess(conv, 2)
if __name__ == "__main__":
absltest.main()
| apache-2.0 |
wbushey/santaclaus | tests/test_santa_v0.py | 1 | 1789 | # -*- coding: utf8 -*-
from __future__ import absolute_import, unicode_literals
import json
from santaclaus import db
from santaclaus.models import Person
from santa_test_helper import SantaClausTestCase
class SantaClausV0Test(SantaClausTestCase):
def test_request_with_a_name(self):
r = self.app.get('/?name=%s' % self.fake.name().replace(" ", "%20"))
self.assertEqual(r.status_code, 200)
self.assert_json_response(r)
data_js = json.loads(r.data)
self.assertTrue('name' in data_js)
self.assertTrue('status' in data_js)
def test_request_without_a_name(self):
r = self.app.get('/')
self.assertEqual(r.status_code, 400)
self.assert_json_response(r)
data_js = json.loads(r.data)
self.assertTrue('error' in data_js)
def test_valid_list_requests(self):
persons = {
'Naughty': [],
'Nice': []
}
for i in range(10):
p = Person(self.fake.name())
persons[p.status].append(p.name)
db.session.add(p)
db.session.commit()
naught_r = self.app.get('/lists/naughty')
self.assert_json_response(naught_r)
self.assertEqual(naught_r.status_code, 200)
nice_r = self.app.get('/lists/nice')
self.assert_json_response(nice_r)
self.assertEqual(nice_r.status_code, 200)
naughty_list = json.loads(naught_r.data)['list']
nice_list = json.loads(nice_r.data)['list']
naughty_list_complete = all(
name in naughty_list for name in persons['Naughty'])
nice_list_complete = all(
name in nice_list for name in persons['Nice'])
self.assertTrue(naughty_list_complete)
self.assertTrue(nice_list_complete)
| gpl-3.0 |
akhilari7/pa-dude | lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/contrib/appengine.py | 195 | 7531 | from __future__ import absolute_import
import logging
import os
import warnings
from ..exceptions import (
HTTPError,
HTTPWarning,
MaxRetryError,
ProtocolError,
TimeoutError,
SSLError
)
from ..packages.six import BytesIO
from ..request import RequestMethods
from ..response import HTTPResponse
from ..util.timeout import Timeout
from ..util.retry import Retry
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
log = logging.getLogger(__name__)
class AppEnginePlatformWarning(HTTPWarning):
pass
class AppEnginePlatformError(HTTPError):
pass
class AppEngineManager(RequestMethods):
"""
Connection manager for Google App Engine sandbox applications.
This manager uses the URLFetch service directly instead of using the
emulated httplib, and is subject to URLFetch limitations as described in
the App Engine documentation here:
https://cloud.google.com/appengine/docs/python/urlfetch
Notably it will raise an AppEnginePlatformError if:
* URLFetch is not available.
* If you attempt to use this on GAEv2 (Managed VMs), as full socket
support is available.
* If a request size is more than 10 megabytes.
* If a response size is more than 32 megabtyes.
* If you use an unsupported request method such as OPTIONS.
Beyond those cases, it will raise normal urllib3 errors.
"""
def __init__(self, headers=None, retries=None, validate_certificate=True):
if not urlfetch:
raise AppEnginePlatformError(
"URLFetch is not available in this environment.")
if is_prod_appengine_mvms():
raise AppEnginePlatformError(
"Use normal urllib3.PoolManager instead of AppEngineManager"
"on Managed VMs, as using URLFetch is not necessary in "
"this environment.")
warnings.warn(
"urllib3 is using URLFetch on Google App Engine sandbox instead "
"of sockets. To use sockets directly instead of URLFetch see "
"https://urllib3.readthedocs.org/en/latest/contrib.html.",
AppEnginePlatformWarning)
RequestMethods.__init__(self, headers)
self.validate_certificate = validate_certificate
self.retries = retries or Retry.DEFAULT
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Return False to re-raise any potential exceptions
return False
def urlopen(self, method, url, body=None, headers=None,
retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
**response_kw):
retries = self._get_retries(retries, redirect)
try:
response = urlfetch.fetch(
url,
payload=body,
method=method,
headers=headers or {},
allow_truncated=False,
follow_redirects=(
redirect and
retries.redirect != 0 and
retries.total),
deadline=self._get_absolute_timeout(timeout),
validate_certificate=self.validate_certificate,
)
except urlfetch.DeadlineExceededError as e:
raise TimeoutError(self, e)
except urlfetch.InvalidURLError as e:
if 'too large' in str(e):
raise AppEnginePlatformError(
"URLFetch request too large, URLFetch only "
"supports requests up to 10mb in size.", e)
raise ProtocolError(e)
except urlfetch.DownloadError as e:
if 'Too many redirects' in str(e):
raise MaxRetryError(self, url, reason=e)
raise ProtocolError(e)
except urlfetch.ResponseTooLargeError as e:
raise AppEnginePlatformError(
"URLFetch response too large, URLFetch only supports"
"responses up to 32mb in size.", e)
except urlfetch.SSLCertificateError as e:
raise SSLError(e)
except urlfetch.InvalidMethodError as e:
raise AppEnginePlatformError(
"URLFetch does not support method: %s" % method, e)
http_response = self._urlfetch_response_to_http_response(
response, **response_kw)
# Check for redirect response
if (http_response.get_redirect_location() and
retries.raise_on_redirect and redirect):
raise MaxRetryError(self, url, "too many redirects")
# Check if we should retry the HTTP response.
if retries.is_forced_retry(method, status_code=http_response.status):
retries = retries.increment(
method, url, response=http_response, _pool=self)
log.info("Forced retry: %s" % url)
retries.sleep()
return self.urlopen(
method, url,
body=body, headers=headers,
retries=retries, redirect=redirect,
timeout=timeout, **response_kw)
return http_response
def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
if is_prod_appengine():
# Production GAE handles deflate encoding automatically, but does
# not remove the encoding header.
content_encoding = urlfetch_resp.headers.get('content-encoding')
if content_encoding == 'deflate':
del urlfetch_resp.headers['content-encoding']
return HTTPResponse(
# In order for decoding to work, we must present the content as
# a file-like object.
body=BytesIO(urlfetch_resp.content),
headers=urlfetch_resp.headers,
status=urlfetch_resp.status_code,
**response_kw
)
def _get_absolute_timeout(self, timeout):
if timeout is Timeout.DEFAULT_TIMEOUT:
return 5 # 5s is the default timeout for URLFetch.
if isinstance(timeout, Timeout):
if timeout.read is not timeout.connect:
warnings.warn(
"URLFetch does not support granular timeout settings, "
"reverting to total timeout.", AppEnginePlatformWarning)
return timeout.total
return timeout
def _get_retries(self, retries, redirect):
if not isinstance(retries, Retry):
retries = Retry.from_int(
retries, redirect=redirect, default=self.retries)
if retries.connect or retries.read or retries.redirect:
warnings.warn(
"URLFetch only supports total retries and does not "
"recognize connect, read, or redirect retry parameters.",
AppEnginePlatformWarning)
return retries
def is_appengine():
return (is_local_appengine() or
is_prod_appengine() or
is_prod_appengine_mvms())
def is_appengine_sandbox():
return is_appengine() and not is_prod_appengine_mvms()
def is_local_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Development/' in os.environ['SERVER_SOFTWARE'])
def is_prod_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
not is_prod_appengine_mvms())
def is_prod_appengine_mvms():
return os.environ.get('GAE_VM', False) == 'true'
| mit |
QuLogic/specfem1d | Python_version/grid.py | 2 | 2988 | # -*- coding: utf-8 -*-
'''
Definitions of the grid.
'''
from __future__ import (absolute_import, division, print_function)
import numpy as np
import functions
import gll
class OneDimensionalGrid(object):
"""Contains the grid properties"""
def __init__(self, param):
"""Init"""
self.param = param
self.z = np.zeros(param.nGlob)
self.rho = np.zeros((param.nSpec, param.nGLL))
self.mu = np.zeros((param.nSpec, param.nGLL))
self.ticks = np.zeros(param.nSpec + 1)
if param.gridType == 'homogeneous':
self.ticks = np.linspace(0, param.length, param.nSpec + 1)
self.rho.fill(param.meanRho)
self.mu.fill(param.meanMu)
self.z[1:param.nGLJ] = functions.project_inverse(
param.ksiGLJ[1:param.nGLJ],
0,
self.ticks)
ksiGLL = param.ksiGLL[1:]
for i in range(param.nGLL, param.nGlob, param.N):
self.z[i:i + param.N] = functions.project_inverse(ksiGLL,
i // param.N,
self.ticks)
self.z[-1] = self.ticks[-1]
elif param.gridType == 'gradient':
msg = "typeOfGrid == 'gradient' has not been implemented yet"
raise NotImplementedError(msg)
elif param.gridType == 'miscellaneous':
msg = "typeOfGrid == 'miscellaneous' has not been implemented yet"
raise NotImplementedError(msg)
elif param.gridType == 'file':
self.z, self.rho, self.mu = np.loadtxt(param.gridFile, unpack=True)
self.ticks = np.loadtxt(param.ticksFile)
else:
raise ValueError('Unknown grid type: %s' % (param.gridType, ))
# Jacobians at the GLL (and GLJ for the first element in axisym)
# points (arrays nSpec*(N+1) elements)
self.dXdKsi = gll.jacobian(self.ticks, param)
self.dKsiDx = gll.jacobian_inverse(self.ticks, param)
def plot(self):
"""Plot the grid
my_ticks gives the abscissa of the borders
TODO I should test : _the types of the parameters
_their sizes"""
import matplotlib.pyplot as plt
from matplotlib.ticker import FixedLocator
fig, ax = plt.subplots(2, 1, sharex=True)
ax[0].plot(self.z[self.param.ibool].flat, self.rho.flat, 'b+')
ax[0].set_title(r'$\rho(z)$')
ax[0].xaxis.set_minor_locator(FixedLocator(self.ticks))
ax[0].xaxis.grid(True, which='minor', alpha=0.5)
ax[0].yaxis.grid(True)
ax[1].plot(self.z[self.param.ibool].flat, self.mu.flat, 'r+')
ax[1].set_title(r'$\mu(z)$')
ax[1].xaxis.set_minor_locator(FixedLocator(self.ticks))
ax[1].xaxis.grid(True, which='minor', alpha=0.5)
ax[1].yaxis.grid(True)
plt.suptitle('Grid')
plt.show()
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.