code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import unittest
from .case_board import BoardTestCase
from .helpers import C, WHITE, BLACK
class TestBoardConstruction(BoardTestCase):
def get_board(self, *args, **kwargs):
from chess.models import Board
return Board(*args, **kwargs)
def test_empty(self):
board = self.get_board()
for row in xrange(8):
for col in xrange(8):
piece = board[(row, col)]
self.assertEqual(piece, None)
def test_pawnbattle(self):
config = 'r......rpppppppp................................PPPPPPPPR......R'
board = self.get_board(config)
i = 0
for row in xrange(7, -1, -1):
for col in xrange(0, 8):
piece = board[(row, col)]
if config[i] == '.':
self.assertEqual(piece, None)
else:
self.assertEqual(piece.repr, config[i])
i += 1
state = board.get_state()
self.assertEqual(state['board'], config)
if __name__ == '__main__':
unittest.main()
|
renatopp/liac-chess
|
tests/test_board_construction.py
|
Python
|
mit
| 1,081
|
import unittest
import mock
import DIRT
from models import match_set_factory
import utilities.path
def iter_match_passages(match_set):
for match in match_set.matches:
yield match.alpha_passage
yield match.beta_passage
def contains_contains(l, search_for):
for item in l:
if search_for in item or item in search_for:
return True
return False
class SmokeTest(unittest.TestCase):
pre_dir = 'test_preprocessed'
out_dir = 'test_output'
def _reset_dirs(self):
utilities.path.reset_folder(self.pre_dir)
utilities.path.reset_folder(self.out_dir)
def setUp(self):
self._reset_dirs()
def tearDown(self):
self._reset_dirs()
def _no_matchset_dupes(self, ms):
found = set()
for match in ms:
self.assertNotIn(match, found)
found.add(match)
def full_test(self):
args = mock.Mock()
args.input = 'test_data/full_test/files_to_process.txt'
args.preprocessed_dir = 'test_data/full_test/preprocessed'
args.output_dir = self.out_dir
# TODO: use zhi
args.language = 'eng'
args.comparator = 'simple'
args.gap_length = 10
args.match_length = 10
# Nosetests doesn't seem to like multiprocessing
args.parallel = False
DIRT.main(args)
one_two = match_set_factory.find_in_dir('one', 'two', self.out_dir)
one_three = match_set_factory.find_in_dir('one', 'three', self.out_dir)
three_two = match_set_factory.find_in_dir('three', 'two', self.out_dir)
common_pass = (u'This test file consists of multiple '
u'paragraphs This paragraph in particular '
u'occurs in multiple test files DIRT should '
u'be able to determine this and create the '
u'appropriate matches')
passages_32 = list(iter_match_passages(three_two))
found = contains_contains(passages_32, common_pass)
self.assertTrue(found)
passages_12 = list(iter_match_passages(one_two))
found = contains_contains(passages_12, common_pass)
self.assertTrue(found)
passages_13 = list(iter_match_passages(one_three))
found = contains_contains(passages_13, common_pass)
self.assertTrue(found)
self._no_matchset_dupes(one_two)
self._no_matchset_dupes(one_three)
self._no_matchset_dupes(three_two)
# Check matched passages
self.assertTrue(contains_contains(passages_12,
search_for=u'ONEANDTWO'))
self.assertTrue(contains_contains(passages_13,
search_for=u'ONEANDTHREE'))
self.assertTrue(contains_contains(passages_32,
search_for=u'TWOANDTHREE'))
|
gnarph/DIRT
|
tests.py
|
Python
|
mit
| 2,905
|
#!/usr/bin/python
# Copyright (c)2010-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
# run iperf to measure the effective throughput between two nodes when
# n nodes are connected to a virtual hub/switch; run test for testsec
# and repeat for minnodes <= n <= maxnodes with a step size of
# nodestep
import optparse, sys, os, datetime
from core import pycore
from core.misc import ipaddr
from core.misc.utils import mutecall
try:
mutecall(["iperf", "-v"])
except OSError:
sys.stderr.write("ERROR: running iperf failed\n")
sys.exit(1)
def test(numnodes, testsec):
# node list
n = []
# IP subnet
prefix = ipaddr.IPv4Prefix("10.83.0.0/16")
session = pycore.Session()
# emulated network
net = session.addobj(cls = pycore.nodes.SwitchNode)
for i in range(1, numnodes + 1):
tmp = session.addobj(cls = pycore.nodes.LxcNode, name = "n%d" % i)
tmp.newnetif(net, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
n.append(tmp)
n[0].cmd(["iperf", "-s", "-D"])
n[-1].icmd(["iperf", "-t", str(int(testsec)), "-c", str(prefix.addr(1))])
n[0].cmd(["killall", "-9", "iperf"])
session.shutdown()
def main():
usagestr = "usage: %prog [-h] [options] [args]"
parser = optparse.OptionParser(usage = usagestr)
parser.set_defaults(minnodes = 2)
parser.add_option("-m", "--minnodes", dest = "minnodes", type = int,
help = "min number of nodes to test; default = %s" %
parser.defaults["minnodes"])
parser.set_defaults(maxnodes = 2)
parser.add_option("-n", "--maxnodes", dest = "maxnodes", type = int,
help = "max number of nodes to test; default = %s" %
parser.defaults["maxnodes"])
parser.set_defaults(testsec = 10)
parser.add_option("-t", "--testsec", dest = "testsec", type = int,
help = "test time in seconds; default = %s" %
parser.defaults["testsec"])
parser.set_defaults(nodestep = 1)
parser.add_option("-s", "--nodestep", dest = "nodestep", type = int,
help = "number of nodes step size; default = %s" %
parser.defaults["nodestep"])
def usage(msg = None, err = 0):
sys.stdout.write("\n")
if msg:
sys.stdout.write(msg + "\n\n")
parser.print_help()
sys.exit(err)
# parse command line options
(options, args) = parser.parse_args()
if options.minnodes < 2:
usage("invalid min number of nodes: %s" % options.minnodes)
if options.maxnodes < options.minnodes:
usage("invalid max number of nodes: %s" % options.maxnodes)
if options.testsec < 1:
usage("invalid test time: %s" % options.testsec)
if options.nodestep < 1:
usage("invalid node step: %s" % options.nodestep)
for a in args:
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
start = datetime.datetime.now()
for i in range(options.minnodes, options.maxnodes + 1, options.nodestep):
print(("%s node test:" % str(i)), file = sys.stderr)
test(i, options.testsec)
print("", file = sys.stderr)
print(("elapsed time: %s" % str(datetime.datetime.now() - start)), file=sys.stderr)
if __name__ == "__main__":
main()
|
Benocs/core
|
src/daemon/examples/netns/switchtest.py
|
Python
|
bsd-3-clause
| 3,358
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a syntax highlighter for unified and context diff outputs.
"""
from __future__ import unicode_literals
from E5Gui.E5GenericDiffHighlighter import TERMINAL, E5GenericDiffHighlighter
class SvnDiffHighlighter(E5GenericDiffHighlighter):
"""
Class implementing a diff highlighter for Git.
"""
def __init__(self, doc):
"""
Constructor
@param doc reference to the text document (QTextDocument)
"""
super(SvnDiffHighlighter, self).__init__(doc)
def generateRules(self):
"""
Public method to generate the rule set.
"""
diffHeader = self.makeFormat(fg=self.textColor,
bg=self.headerColor)
diffHeaderBold = self.makeFormat(fg=self.textColor,
bg=self.headerColor,
bold=True)
diffContext = self.makeFormat(fg=self.textColor,
bg=self.contextColor)
diffAdded = self.makeFormat(fg=self.textColor,
bg=self.addedColor)
diffRemoved = self.makeFormat(fg=self.textColor,
bg=self.removedColor)
diffBarRegex = TERMINAL(r'^=+$')
diffHeaderRegex = TERMINAL(r'^[iI]ndex: \S+')
diffOldRegex = TERMINAL(r'^--- ')
diffNewRegex = TERMINAL(r'^\+\+\+')
diffContextRegex = TERMINAL(r'^@@ ')
diffAddedRegex = TERMINAL(r'^[+>]|^A ')
diffRemovedRegex = TERMINAL(r'^[-<]|^D ')
self.createRules((diffOldRegex, diffRemoved),
(diffNewRegex, diffAdded),
(diffContextRegex, diffContext),
(diffHeaderRegex, diffHeader),
(diffBarRegex, diffHeaderBold),
(diffAddedRegex, diffAdded),
(diffRemovedRegex, diffRemoved),
)
|
testmana2/test
|
Plugins/VcsPlugins/vcsSubversion/SvnDiffHighlighter.py
|
Python
|
gpl-3.0
| 2,136
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (c) Copyright 2003-2009 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Don Welch
#
__version__ = '4.1'
__mod__ = 'hp-probe'
__title__ = 'Printer Discovery Utility'
__doc__ = "Discover HPLIP supported USB, parallel, and network attached printers."
# Std Lib
import sys
import getopt
import operator
import os
# Local
from base.g import *
from base import device, utils, tui, module
USAGE = [(__doc__, "", "name", True),
("Usage: %s [OPTIONS]" % __mod__, "", "summary", True),
utils.USAGE_OPTIONS,
("Bus to probe:", "-b<bus> or --bus=<bus>", "option", False),
("", "<bus>: cups, usb\*, net, bt, fw, par (\*default) (Note: bt and fw not supported in this release.)", "option", False),
("Set Time to Live (TTL):", "-t<ttl> or --ttl=<ttl> (Default is 4).", "option", False),
("Set timeout:", "-o<timeout in secs.> or --timeout=<timeout is secs.>", "option", False),
("Filter by functionality:", "-e<filter list> or --filter=<filter list>", "option", False),
("", "<filter list>: comma separated list of one or more of: scan, pcard, fax, copy, or none\*. (\*none is the default)", "option", False),
("Search:", "-s<search re> or --search=<search re>", "option", False),
("", "<search re> must be a valid regular expression (not case sensitive)", "option", False),
("Network discovery method:", "-m<method> or --method=<method>: <method> is 'slp'* or 'mdns'.", "option", False),
utils.USAGE_LOGGING1, utils.USAGE_LOGGING2, utils.USAGE_LOGGING3,
utils.USAGE_HELP,
utils.USAGE_SPACE,
utils.USAGE_EXAMPLES,
("Find all devices on the network:", "hp-probe -bnet", "example", False),
("Find all devices on USB that support scanning:", "hp-probe -busb -escan", "example", False),
("Find all networked devices that contain the name 'lnx' and that support photo cards or scanning:", "hp-probe -bnet -slnx -escan,pcard", "example", False),
("Find all devices that have queues installed in CUPS:", "hp-probe -bcups", "example", False),
("Find all devices on the USB bus:", "hp-probe", "example", False),
]
try:
mod = module.Module(__mod__, __title__, __version__, __doc__, USAGE,
(INTERACTIVE_MODE,))
opts, device_uri, printer_name, mode, ui_toolkit, loc = \
mod.parseStdOpts('b:t:o:e:s:m:',
['ttl=', 'filter=', 'search=', 'find=',
'method=', 'time-out=', 'timeout=', 'bus='],
handle_device_printer=False)
bus = None
timeout=10
ttl=4
filter = []
search = ''
method = 'slp'
for o, a in opts:
if o in ('-b', '--bus'):
try:
bus = [x.lower().strip() for x in a.split(',')]
except TypeError:
bus = ['usb']
if not device.validateBusList(bus):
mod.usage(error_msg=['Invalid bus name'])
elif o in ('-m', '--method'):
method = a.lower().strip()
if method not in ('slp', 'mdns', 'bonjour'):
mod.usage(error_msg=["Invalid network search protocol name. Must be 'slp' or 'mdns'."])
else:
bus = ['net']
elif o in ('-t', '--ttl'):
try:
ttl = int(a)
except ValueError:
ttl = 4
log.note("TTL value error. TTL set to default of 4 hops.")
elif o in ('-o', '--timeout', '--time-out'):
try:
timeout = int(a)
if timeout > 45:
log.note("Timeout > 45secs. Setting to 45secs.")
timeout = 45
except ValueError:
timeout = 5
log.note("Timeout value error. Timeout set to default of 5secs.")
if timeout < 0:
mod.usage(error_msg=["You must specify a positive timeout in seconds."])
elif o in ('-e', '--filter'):
filter = [x.strip().lower() for x in a.split(',')]
if not device.validateFilterList(filter):
mod.usage(error_msg=["Invalid term in filter"])
elif o in ('-s', '--search', '--find'):
search = a.lower().strip()
if bus is None:
bus = tui.connection_table()
if bus is None:
sys.exit(0)
log.info("\nUsing connection type: %s" % bus[0])
log.info("")
tui.header("DEVICE DISCOVERY")
for b in bus:
if b == 'net':
log.info(log.bold("Probing network for printers. Please wait, this will take approx. %d seconds...\n" % timeout))
FILTER_MAP = {'print' : None,
'none' : None,
'scan': 'scan-type',
'copy': 'copy-type',
'pcard': 'pcard-type',
'fax': 'fax-type',
}
filter_dict = {}
for f in filter:
if f in FILTER_MAP:
filter_dict[FILTER_MAP[f]] = (operator.gt, 0)
else:
filter_dict[f] = (operator.gt, 0)
log.debug(filter_dict)
devices = device.probeDevices([b], timeout, ttl, filter_dict, search, method)
cleanup_spinner()
max_c1, max_c2, max_c3, max_c4 = 0, 0, 0, 0
if devices:
for d in devices:
max_c1 = max(len(d), max_c1)
max_c3 = max(len(devices[d][0]), max_c3)
max_c4 = max(len(devices[d][2]), max_c4)
if b == 'net':
formatter = utils.TextFormatter(
(
{'width': max_c1, 'margin' : 2},
{'width': max_c3, 'margin' : 2},
{'width': max_c4, 'margin' : 2},
)
)
log.info(formatter.compose(("Device URI", "Model", "Name")))
log.info(formatter.compose(('-'*max_c1, '-'*max_c3, '-'*max_c4)))
for d in devices:
log.info(formatter.compose((d, devices[d][0], devices[d][2])))
elif b in ('usb', 'par', 'cups'):
formatter = utils.TextFormatter(
(
{'width': max_c1, 'margin' : 2},
{'width': max_c3, 'margin' : 2},
)
)
log.info(formatter.compose(("Device URI", "Model")))
log.info(formatter.compose(('-'*max_c1, '-'*max_c3)))
for d in devices:
log.info(formatter.compose((d, devices[d][0])))
else:
log.error("Invalid bus: %s" % b)
log.info("\nFound %d printer(s) on the '%s' bus.\n" % (len(devices), b))
else:
log.warn("No devices found on the '%s' bus. If this isn't the result you are expecting," % b)
if b == 'net':
log.warn("check your network connections and make sure your internet")
log.warn("firewall software is disabled.")
else:
log.warn("check to make sure your devices are properly connected and powered on.")
except KeyboardInterrupt:
log.error("User exit")
log.info("")
log.info("Done.")
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/hplip/probe.py
|
Python
|
gpl-3.0
| 8,190
|
"""
Basic IRT support for assessment items
"""
from dlkit.json_.osid.metadata import Metadata
from dlkit.abstract_osid.assessment import record_templates as abc_assessment_records
from dlkit.abstract_osid.osid.errors import IllegalState, InvalidArgument
from ...osid.base_records import DecimalValuesRecord,\
DecimalValuesFormRecord,\
QueryInitRecord
class ItemDecimalValuesRecord(abc_assessment_records.ItemRecord, DecimalValuesRecord):
"""actual assessment item record"""
_implemented_record_type_identifiers = [
'item-decimal-values'
]
class ItemDecimalValuesFormRecord(DecimalValuesFormRecord,
abc_assessment_records.ItemFormRecord):
"""an assessment item with decimal values attached"""
_implemented_record_type_identifiers = [
'item-decimal-values'
]
def __init__(self, osid_object_form=None):
if osid_object_form is not None:
self.my_osid_object_form = osid_object_form
self._init_metadata()
if not self.my_osid_object_form.is_for_update():
self._init_map()
super(ItemDecimalValuesFormRecord, self).__init__(osid_object_form=osid_object_form)
class IRTItemRecord(DecimalValuesRecord):
"""include 3 basic IRT values"""
_implemented_record_type_identifiers = [
'irt-item',
'item-decimal-values'
]
def has_difficulty_value(self):
"""stub"""
return 'difficulty' in self.my_osid_object._my_map['decimalValues']
def get_difficulty_value(self):
"""stub"""
if self.has_difficulty_value():
return self.my_osid_object._my_map['decimalValues']['difficulty']
raise IllegalState()
def has_discrimination_value(self):
"""stub"""
return 'discrimination' in self.my_osid_object._my_map['decimalValues']
def get_discrimination_value(self):
"""stub"""
if self.has_discrimination_value():
return self.my_osid_object._my_map['decimalValues']['discrimination']
raise IllegalState()
def has_pseudo_guessing_value(self):
"""stub"""
return 'pseudoGuessing' in self.my_osid_object._my_map['decimalValues']
def get_pseudo_guessing_value(self):
"""stub"""
if self.has_pseudo_guessing_value():
return self.my_osid_object._my_map['decimalValues']['pseudoGuessing']
raise IllegalState()
difficulty = property(fget=get_difficulty_value)
discrimination = property(fget=get_discrimination_value)
guessing = property(fget=get_pseudo_guessing_value)
class IRTItemFormRecord(ItemDecimalValuesFormRecord):
"""form to create / update the 3 IRT values we support"""
_implemented_record_type_identifiers = [
'irt-item',
'item-decimal-values'
]
def __init__(self, osid_object_form=None):
if osid_object_form is not None:
self.my_osid_object_form = osid_object_form
self._init_metadata()
if not self.my_osid_object_form.is_for_update():
self._init_map()
super(IRTItemFormRecord, self).__init__(osid_object_form=osid_object_form)
def _init_map(self):
"""stub"""
super(IRTItemFormRecord, self)._init_map()
self.my_osid_object_form._my_map['decimalValues']['difficulty'] = \
self._decimal_value_metadata['default_decimal_values'][1]
self.my_osid_object_form._my_map['decimalValues']['discrimination'] = \
self._decimal_value_metadata['default_decimal_values'][1]
self.my_osid_object_form._my_map['decimalValues']['pseudoGuessing'] = \
self._decimal_value_metadata['default_decimal_values'][1]
def get_difficulty_value_metadata(self):
"""stub"""
return Metadata(**self._decimal_value_metadata)
def get_discrimination_value_metadata(self):
"""stub"""
return Metadata(**self._decimal_value_metadata)
def get_pseudo_guessing_value_metadata(self):
"""stub"""
return Metadata(**self._decimal_value_metadata)
def set_difficulty_value(self, difficulty):
"""stub"""
if not isinstance(difficulty, float):
raise InvalidArgument('difficulty value must be a decimal')
self.add_decimal_value(difficulty, 'difficulty')
def set_discrimination_value(self, discrimination):
"""stub"""
if not isinstance(discrimination, float):
raise InvalidArgument('discrimination value must be a decimal')
self.add_decimal_value(discrimination, 'discrimination')
def set_pseudo_guessing_value(self, pseudo_guessing):
"""stub"""
if not isinstance(pseudo_guessing, float):
raise InvalidArgument('pseudo-guessing value must be a decimal')
self.add_decimal_value(pseudo_guessing, 'pseudoGuessing')
class IRTItemQueryRecord(QueryInitRecord):
"""query items by IRT attributes"""
def match_minimum_difficulty(self, value, match):
"""stub"""
self._my_osid_query._match_minimum_decimal('decimalValues.difficulty',
value,
match)
def clear_minimum_difficulty_terms(self):
"""stub"""
self._my_osid_query._clear_minimum_terms('decimalValues.difficulty')
def match_maximum_difficulty(self, value, match):
"""stub"""
self._my_osid_query._match_maximum_decimal('decimalValues.difficulty',
value,
match)
def clear_maximum_difficulty_terms(self):
"""stub"""
self._my_osid_query._clear_maximum_terms('decimalValues.difficulty')
def match_minimum_discrimination(self, value, match):
"""stub"""
self._my_osid_query._match_minimum_decimal('decimalValues.discrimination',
value,
match)
def clear_miniumum_discrimination_terms(self):
"""stub"""
self._my_osid_query._clear_minimum_terms('decimalValues.discrimination')
def match_maximum_discrimination(self, value, match):
"""stub"""
self._my_osid_query._match_maximum_decimal('decimalValues.discrimination',
value,
match)
def clear_maximum_discrimination_terms(self):
"""stub"""
self._my_osid_query._clear_maximum_terms('decimalValues.discrimination')
def match_minimum_pseudo_guessing(self, value, match):
"""stub"""
self._my_osid_query._match_minimum_decimal('decimalValues.pseudo_guessing',
value,
match)
def clear_miniumum_pseudo_guessing_terms(self):
"""stub"""
self._my_osid_query._clear_minimum_terms('decimalValues.pseudo_guessing')
def match_maximum_pseudo_guessing(self, value, match):
"""stub"""
self._my_osid_query._match_maximum_decimal('decimalValues.pseudo_guessing',
value,
match)
def clear_maximum_pseudo_guessing_terms(self):
"""stub"""
self._my_osid_query._clear_maximum_terms('decimalValues.pseudo_guessing')
|
mitsei/dlkit
|
dlkit/records/assessment/analytic/irt.py
|
Python
|
mit
| 7,495
|
"""
unittests for xmodule
Run like this:
paver test_lib -l common/lib/xmodule
"""
import json
import os
import pprint
import unittest
import inspect
from contextlib import contextmanager
from lazy import lazy
from mock import Mock
from operator import attrgetter
from path import path
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds, Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.x_module import ModuleSystem, XModuleDescriptor, XModuleMixin
from xmodule.modulestore.inheritance import InheritanceMixin, own_metadata
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.mako_module import MakoDescriptorSystem
from xmodule.error_module import ErrorDescriptor
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.mongo.draft import DraftModuleStore
from xmodule.modulestore.xml import CourseLocationManager
from xmodule.modulestore.draft_and_published import DIRECT_ONLY_CATEGORIES, ModuleStoreDraftAndPublished
MODULE_DIR = path(__file__).dirname()
# Location of common test DATA directory
# '../../../../edx-platform/common/test/data/'
DATA_DIR = MODULE_DIR.parent.parent.parent.parent / "test" / "data"
open_ended_grading_interface = {
'url': 'blah/',
'username': 'incorrect_user',
'password': 'incorrect_pass',
'staff_grading': 'staff_grading',
'peer_grading': 'peer_grading',
'grading_controller': 'grading_controller',
}
class TestModuleSystem(ModuleSystem): # pylint: disable=abstract-method
"""
ModuleSystem for testing
"""
def __init__(self, **kwargs):
id_manager = CourseLocationManager(kwargs['course_id'])
kwargs.setdefault('id_reader', id_manager)
kwargs.setdefault('id_generator', id_manager)
kwargs.setdefault('services', {}).setdefault('field-data', DictFieldData({}))
super(TestModuleSystem, self).__init__(**kwargs)
def handler_url(self, block, handler, suffix='', query='', thirdparty=False):
return '{usage_id}/{handler}{suffix}?{query}'.format(
usage_id=unicode(block.scope_ids.usage_id),
handler=handler,
suffix=suffix,
query=query,
)
def local_resource_url(self, block, uri):
return 'resource/{usage_id}/{uri}'.format(
usage_id=unicode(block.scope_ids.usage_id),
uri=uri,
)
# Disable XBlockAsides in most tests
def get_asides(self, block):
return []
def get_test_system(course_id=SlashSeparatedCourseKey('org', 'course', 'run')):
"""
Construct a test ModuleSystem instance.
By default, the render_template() method simply returns the repr of the
context it is passed. You can override this behavior by monkey patching::
system = get_test_system()
system.render_template = my_render_func
where `my_render_func` is a function of the form my_render_func(template, context).
"""
user = Mock(name='get_test_system.user', is_staff=False)
descriptor_system = get_test_descriptor_system()
def get_module(descriptor):
"""Mocks module_system get_module function"""
# pylint: disable=protected-access
# Unlike XBlock Runtimes or DescriptorSystems,
# each XModule is provided with a new ModuleSystem.
# Construct one for the new XModule.
module_system = get_test_system()
# Descriptors can all share a single DescriptorSystem.
# So, bind to the same one as the current descriptor.
module_system.descriptor_runtime = descriptor._runtime # pylint: disable=protected-access
descriptor.bind_for_student(module_system, descriptor._field_data, user.id)
return descriptor
return TestModuleSystem(
static_url='/static',
track_function=Mock(name='get_test_system.track_function'),
get_module=get_module,
render_template=mock_render_template,
replace_urls=str,
user=user,
get_real_user=lambda(__): user,
filestore=Mock(name='get_test_system.filestore'),
debug=True,
hostname="edx.org",
xqueue={
'interface': None,
'callback_url': '/',
'default_queuename': 'testqueue',
'waittime': 10,
'construct_callback': Mock(name='get_test_system.xqueue.construct_callback', side_effect="/"),
},
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
anonymous_student_id='student',
open_ended_grading_interface=open_ended_grading_interface,
course_id=course_id,
error_descriptor_class=ErrorDescriptor,
get_user_role=Mock(name='get_test_system.get_user_role', is_staff=False),
user_location=Mock(name='get_test_system.user_location'),
descriptor_runtime=descriptor_system,
)
def get_test_descriptor_system():
"""
Construct a test DescriptorSystem instance.
"""
field_data = DictFieldData({})
descriptor_system = MakoDescriptorSystem(
load_item=Mock(name='get_test_descriptor_system.load_item'),
resources_fs=Mock(name='get_test_descriptor_system.resources_fs'),
error_tracker=Mock(name='get_test_descriptor_system.error_tracker'),
render_template=mock_render_template,
mixins=(InheritanceMixin, XModuleMixin),
field_data=field_data,
services={'field-data': field_data},
)
descriptor_system.get_asides = lambda block: []
return descriptor_system
def mock_render_template(*args, **kwargs):
"""
Pretty-print the args and kwargs.
Allows us to not depend on any actual template rendering mechanism,
while still returning a unicode object
"""
return pprint.pformat((args, kwargs)).decode()
class ModelsTest(unittest.TestCase):
def test_load_class(self):
vc = XModuleDescriptor.load_class('video')
vc_str = "<class 'xmodule.video_module.video_module.VideoDescriptor'>"
self.assertEqual(str(vc), vc_str)
class LogicTest(unittest.TestCase):
"""Base class for testing xmodule logic."""
descriptor_class = None
raw_field_data = {}
def setUp(self):
super(LogicTest, self).setUp()
self.system = get_test_system()
self.descriptor = Mock(name="descriptor", url_name='', category='test')
self.xmodule_class = self.descriptor_class.module_class
usage_key = self.system.course_id.make_usage_key(self.descriptor.category, 'test_loc')
# ScopeIds has 4 fields: user_id, block_type, def_id, usage_id
scope_ids = ScopeIds(1, self.descriptor.category, usage_key, usage_key)
self.xmodule = self.xmodule_class(
self.descriptor, self.system, DictFieldData(self.raw_field_data), scope_ids
)
def ajax_request(self, dispatch, data):
"""Call Xmodule.handle_ajax."""
return json.loads(self.xmodule.handle_ajax(dispatch, data))
def map_references(value, field, actual_course_key):
"""
Map the references in value to actual_course_key and return value
"""
if not value: # if falsey
return value
if isinstance(field, Reference):
return value.map_into_course(actual_course_key)
if isinstance(field, ReferenceList):
return [sub.map_into_course(actual_course_key) for sub in value]
if isinstance(field, ReferenceValueDict):
return {key: ele.map_into_course(actual_course_key) for key, ele in value.iteritems()}
return value
class BulkAssertionManager(object):
"""
This provides a facility for making a large number of assertions, and seeing all of
the failures at once, rather than only seeing single failures.
"""
def __init__(self, test_case):
self._equal_assertions = []
self._test_case = test_case
def run_assertions(self):
if len(self._equal_assertions) > 0:
raise AssertionError(self._equal_assertions)
class BulkAssertionTest(unittest.TestCase):
"""
This context manager provides a BulkAssertionManager to assert with,
and then calls `run_assertions` at the end of the block to validate all
of the assertions.
"""
def setUp(self, *args, **kwargs):
super(BulkAssertionTest, self).setUp(*args, **kwargs)
self._manager = None
@contextmanager
def bulk_assertions(self):
if self._manager:
yield
else:
try:
self._manager = BulkAssertionManager(self)
yield
finally:
self._manager.run_assertions()
self._manager = None
def assertEqual(self, expected, actual, message=None):
if self._manager is not None:
try:
super(BulkAssertionTest, self).assertEqual(expected, actual, message)
except Exception as error: # pylint: disable=broad-except
exc_stack = inspect.stack()[1]
if message is not None:
msg = '{} -> {}:{} -> {}'.format(message, exc_stack[1], exc_stack[2], unicode(error))
else:
msg = '{}:{} -> {}'.format(exc_stack[1], exc_stack[2], unicode(error))
self._manager._equal_assertions.append(msg) # pylint: disable=protected-access
else:
super(BulkAssertionTest, self).assertEqual(expected, actual, message)
assertEquals = assertEqual
class LazyFormat(object):
"""
An stringy object that delays formatting until it's put into a string context.
"""
__slots__ = ('template', 'args', 'kwargs', '_message')
def __init__(self, template, *args, **kwargs):
self.template = template
self.args = args
self.kwargs = kwargs
self._message = None
def __unicode__(self):
if self._message is None:
self._message = self.template.format(*self.args, **self.kwargs)
return self._message
def __repr__(self):
return unicode(self)
class CourseComparisonTest(BulkAssertionTest):
"""
Mixin that has methods for comparing courses for equality.
"""
def setUp(self):
super(CourseComparisonTest, self).setUp()
self.field_exclusions = set()
self.ignored_asset_keys = set()
def exclude_field(self, usage_id, field_name):
"""
Mark field ``field_name`` of expected block usage ``usage_id`` as ignored
Args:
usage_id (:class:`opaque_keys.edx.UsageKey` or ``None``). If ``None``, skip, this field in all blocks
field_name (string): The name of the field to skip
"""
self.field_exclusions.add((usage_id, field_name))
def ignore_asset_key(self, key_name):
"""
Add an asset key to the list of keys to be ignored when comparing assets.
Args:
key_name: The name of the key to ignore.
"""
self.ignored_asset_keys.add(key_name)
def assertReferenceRelativelyEqual(self, reference_field, expected_block, actual_block):
"""
Assert that the supplied reference field is identical on the expected_block and actual_block,
assoming that the references are only relative (that is, comparing only on block_type and block_id,
not course_key).
"""
def extract_key(usage_key):
if usage_key is None:
return None
else:
return (usage_key.block_type, usage_key.block_id)
expected = reference_field.read_from(expected_block)
actual = reference_field.read_from(actual_block)
if isinstance(reference_field, Reference):
expected = extract_key(expected)
actual = extract_key(actual)
elif isinstance(reference_field, ReferenceList):
expected = [extract_key(key) for key in expected]
actual = [extract_key(key) for key in actual]
elif isinstance(reference_field, ReferenceValueDict):
expected = {key: extract_key(val) for (key, val) in expected.iteritems()}
actual = {key: extract_key(val) for (key, val) in actual.iteritems()}
self.assertEqual(
expected,
actual,
LazyFormat(
"Field {} doesn't match between usages {} and {}: {!r} != {!r}",
reference_field.name,
expected_block.scope_ids.usage_id,
actual_block.scope_ids.usage_id,
expected,
actual
)
)
def assertBlocksEqualByFields(self, expected_block, actual_block):
self.assertEqual(expected_block.fields, actual_block.fields)
for field in expected_block.fields.values():
self.assertFieldEqual(field, expected_block, actual_block)
def assertFieldEqual(self, field, expected_block, actual_block):
if isinstance(field, (Reference, ReferenceList, ReferenceValueDict)):
self.assertReferenceRelativelyEqual(field, expected_block, actual_block)
else:
expected = field.read_from(expected_block)
actual = field.read_from(actual_block)
self.assertEqual(
expected,
actual,
LazyFormat(
"Field {} doesn't match between usages {} and {}: {!r} != {!r}",
field.name,
expected_block.scope_ids.usage_id,
actual_block.scope_ids.usage_id,
expected,
actual
)
)
def assertCoursesEqual(self, expected_store, expected_course_key, actual_store, actual_course_key):
"""
Assert that the courses identified by ``expected_course_key`` in ``expected_store`` and
``actual_course_key`` in ``actual_store`` are identical (ignore differences related
owing to the course_keys being different).
Any field value mentioned in ``self.field_exclusions`` by the key (usage_id, field_name)
will be ignored for the purpose of equality checking.
"""
# compare published
with expected_store.branch_setting(ModuleStoreEnum.Branch.published_only, expected_course_key):
with actual_store.branch_setting(ModuleStoreEnum.Branch.published_only, actual_course_key):
expected_items = expected_store.get_items(expected_course_key, revision=ModuleStoreEnum.RevisionOption.published_only)
actual_items = actual_store.get_items(actual_course_key, revision=ModuleStoreEnum.RevisionOption.published_only)
self.assertGreater(len(expected_items), 0)
self._assertCoursesEqual(expected_items, actual_items, actual_course_key)
# if the modulestore supports having a draft branch
if isinstance(expected_store, ModuleStoreDraftAndPublished):
with expected_store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, expected_course_key):
with actual_store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, actual_course_key):
# compare draft
if expected_store.get_modulestore_type(None) == ModuleStoreEnum.Type.split:
revision = ModuleStoreEnum.RevisionOption.draft_only
else:
revision = None
expected_items = expected_store.get_items(expected_course_key, revision=revision)
if actual_store.get_modulestore_type(None) == ModuleStoreEnum.Type.split:
revision = ModuleStoreEnum.RevisionOption.draft_only
else:
revision = None
actual_items = actual_store.get_items(actual_course_key, revision=revision)
self._assertCoursesEqual(expected_items, actual_items, actual_course_key, expect_drafts=True)
def _assertCoursesEqual(self, expected_items, actual_items, actual_course_key, expect_drafts=False):
with self.bulk_assertions():
self.assertEqual(len(expected_items), len(actual_items))
def map_key(usage_key):
return (usage_key.block_type, usage_key.block_id)
actual_item_map = {
map_key(item.location): item
for item in actual_items
}
for expected_item in expected_items:
actual_item_location = actual_course_key.make_usage_key(expected_item.category, expected_item.location.block_id)
# split and old mongo use different names for the course root but we don't know which
# modulestore actual's come from here; so, assume old mongo and if that fails, assume split
if expected_item.location.category == 'course':
actual_item_location = actual_item_location.replace(name=actual_item_location.run)
actual_item = actual_item_map.get(map_key(actual_item_location))
# must be split
if actual_item is None and expected_item.location.category == 'course':
actual_item_location = actual_item_location.replace(name='course')
actual_item = actual_item_map.get(map_key(actual_item_location))
# Formatting the message slows down tests of large courses significantly, so only do it if it would be used
self.assertIsNotNone(actual_item, LazyFormat(u'cannot find {} in {}', map_key(actual_item_location), actual_item_map))
# compare fields
self.assertEqual(expected_item.fields, actual_item.fields)
for field_name, field in expected_item.fields.iteritems():
if (expected_item.scope_ids.usage_id, field_name) in self.field_exclusions:
continue
if (None, field_name) in self.field_exclusions:
continue
# Children are handled specially
if field_name == 'children':
continue
self.assertFieldEqual(field, expected_item, actual_item)
# compare children
self.assertEqual(expected_item.has_children, actual_item.has_children)
if expected_item.has_children:
expected_children = [
(expected_item_child.location.block_type, expected_item_child.location.block_id)
# get_children() rather than children to strip privates from public parents
for expected_item_child in expected_item.get_children()
]
actual_children = [
(item_child.location.block_type, item_child.location.block_id)
# get_children() rather than children to strip privates from public parents
for item_child in actual_item.get_children()
]
self.assertEqual(expected_children, actual_children)
def assertAssetEqual(self, expected_course_key, expected_asset, actual_course_key, actual_asset):
"""
Assert that two assets are equal, allowing for differences related to their being from different courses.
"""
for key in self.ignored_asset_keys:
if key in expected_asset:
del expected_asset[key]
if key in actual_asset:
del actual_asset[key]
expected_key = expected_asset.pop('asset_key')
actual_key = actual_asset.pop('asset_key')
self.assertEqual(expected_key.map_into_course(actual_course_key), actual_key)
self.assertEqual(expected_key, actual_key.map_into_course(expected_course_key))
expected_filename = expected_asset.pop('filename')
actual_filename = actual_asset.pop('filename')
self.assertEqual(expected_key.to_deprecated_string(), expected_filename)
self.assertEqual(actual_key.to_deprecated_string(), actual_filename)
self.assertEqual(expected_asset, actual_asset)
def _assertAssetsEqual(self, expected_course_key, expected_assets, actual_course_key, actual_assets): # pylint: disable=invalid-name
"""
Private helper method for assertAssetsEqual
"""
self.assertEqual(len(expected_assets), len(actual_assets))
actual_assets_map = {asset['asset_key']: asset for asset in actual_assets}
for expected_item in expected_assets:
actual_item = actual_assets_map[expected_item['asset_key'].map_into_course(actual_course_key)]
self.assertAssetEqual(expected_course_key, expected_item, actual_course_key, actual_item)
def assertAssetsEqual(self, expected_store, expected_course_key, actual_store, actual_course_key):
"""
Assert that the course assets identified by ``expected_course_key`` in ``expected_store`` and
``actual_course_key`` in ``actual_store`` are identical, allowing for differences related
to their being from different course keys.
"""
expected_content, expected_count = expected_store.get_all_content_for_course(expected_course_key)
actual_content, actual_count = actual_store.get_all_content_for_course(actual_course_key)
with self.bulk_assertions():
self.assertEqual(expected_count, actual_count)
self._assertAssetsEqual(expected_course_key, expected_content, actual_course_key, actual_content)
expected_thumbs = expected_store.get_all_content_thumbnails_for_course(expected_course_key)
actual_thumbs = actual_store.get_all_content_thumbnails_for_course(actual_course_key)
self._assertAssetsEqual(expected_course_key, expected_thumbs, actual_course_key, actual_thumbs)
def assertAssetsMetadataEqual(self, expected_modulestore, expected_course_key, actual_modulestore, actual_course_key):
"""
Assert that the modulestore asset metdata for the ``expected_course_key`` and the ``actual_course_key``
are equivalent.
"""
expected_course_assets = expected_modulestore.get_all_asset_metadata(
expected_course_key, None, sort=('displayname', ModuleStoreEnum.SortOrder.descending)
)
actual_course_assets = actual_modulestore.get_all_asset_metadata(
actual_course_key, None, sort=('displayname', ModuleStoreEnum.SortOrder.descending)
)
self.assertEquals(len(expected_course_assets), len(actual_course_assets))
for idx, __ in enumerate(expected_course_assets):
for attr in AssetMetadata.ATTRS_ALLOWED_TO_UPDATE:
if attr in ('edited_on',):
# edited_on is updated upon import.
continue
self.assertEquals(getattr(expected_course_assets[idx], attr), getattr(actual_course_assets[idx], attr))
|
sameetb-cuelogic/edx-platform-test
|
common/lib/xmodule/xmodule/tests/__init__.py
|
Python
|
agpl-3.0
| 23,045
|
from flask.testing import FlaskClient
from app import create_app, db
from app.models import Framework, FrameworkLot
class TestClient(FlaskClient):
"""This is a custom Test Client for handling the creation of a dedicated application context on a per request basis.
Flask-SQLAlchemy attaches its db operations to the top application context on the context stack.
Requests use the top application context on the context stack or create a new one if none exists.
Normally this isn't an issue. Each new request in production will use its own thread and
on finding that there is no existing application context will create a new one.
In tests however we require an application context to create/ update the database with the data required for the
test. We can then end up using this polluted application context in the view we're testing if we don't pop it. In
the open method of this class we create a fresh application context for the request/ view to use and remove it
after so it doesn't leak back to the test.
"""
def open(self, *args, **kwargs):
db.session.close()
app_context = self.application.app_context()
app_context.push()
res = super(TestClient, self).open(*args, **kwargs)
db.session.expire_all()
app_context.pop()
return res
class WSGIApplicationWithEnvironment(object):
def __init__(self, app, **kwargs):
self.app = app
self.kwargs = kwargs
def __call__(self, environ, start_response):
for key, value in self.kwargs.items():
environ[key] = value
return self.app(environ, start_response)
class BaseApplicationTest(object):
config = None
def setup(self):
self.app = create_app('test')
self.wsgi_app_main = WSGIApplicationWithEnvironment(
self.app.wsgi_app,
HTTP_AUTHORIZATION='Bearer {}'.format(self.app.config['DM_API_AUTH_TOKENS']),
REMOTE_ADDR='127.0.0.1',
)
self.wsgi_app_callbacks = WSGIApplicationWithEnvironment(
self.app.wsgi_app,
HTTP_AUTHORIZATION='Bearer {}'.format(self.app.config['DM_API_CALLBACK_AUTH_TOKENS']),
REMOTE_ADDR='127.0.0.1',
)
self.app.wsgi_app = self.wsgi_app_main
self.app.test_client_class = TestClient
self.client = self.app.test_client()
self.app_context = self.app.app_context()
self.app_context.push()
def teardown(self):
db.session.remove()
for table in reversed(db.metadata.sorted_tables):
if table.name not in ["lots", "frameworks", "framework_lots"]:
db.engine.execute(table.delete())
FrameworkLot.query.filter(FrameworkLot.framework_id >= 100).delete()
Framework.query.filter(Framework.id >= 100).delete()
# Remove any framework variation details
frameworks = db.session.query(Framework).filter(Framework.framework_agreement_details is not None)
for framework in frameworks.all():
framework.framework_agreement_details = None
db.session.add(framework)
db.session.commit()
db.get_engine(self.app).dispose()
self.app_context.pop()
class JSONTestMixin(object):
"""
Tests to verify that endpoints that accept JSON.
"""
endpoint = None
method = None
client = None
def open(self, **kwargs):
return self.client.open(
self.endpoint.format(self=self),
method=self.method,
**kwargs
)
def test_non_json_causes_failure(self):
response = self.open(
data='this is not JSON',
content_type='application/json')
assert response.status_code == 400
assert b'Invalid JSON' in response.get_data()
def test_invalid_content_type_causes_failure(self):
response = self.open(
data='{"services": {"foo": "bar"}}')
assert response.status_code == 400
assert b'Unexpected Content-Type' in response.get_data()
class JSONUpdateTestMixin(JSONTestMixin):
def test_missing_updated_by_should_fail_with_400(self):
response = self.open(
data='{}',
content_type='application/json')
assert response.status_code == 400
assert "'updated_by' is a required property" in response.get_data(as_text=True)
|
alphagov/digitalmarketplace-api
|
tests/bases.py
|
Python
|
mit
| 4,405
|
#!/usr/bin/env python2
# coding: utf-8
import pygame
import sys
from functions import load_png
from config import ASSET_JOUEUR
from config import PLAYER_SPEED, PLAYER_LIFE_MAX, BOMB_RANGE, SIDE_LENGTH
from map import Bombe
class Joueur(pygame.sprite.Sprite):
def __init__(self, numero, spawn_topleft):
pygame.sprite.Sprite.__init__(self)
self.numero = numero
self.image, self.rect = load_png(ASSET_JOUEUR['BAS'])
self.rect.topleft = spawn_topleft
self.direction = "bas"
self.is_at_spawn = False
self.spawn = pygame.Rect(spawn_topleft, (SIDE_LENGTH, SIDE_LENGTH))
self.life_max = PLAYER_LIFE_MAX
self.life = PLAYER_LIFE_MAX
self.bombe_detection = True
self.bombe_range = BOMB_RANGE
self.bombe_max_number = 1
self.bombe_number = 1
self.velocity = PLAYER_SPEED
self.speed = [0, 0]
self.bouclier = False
self.bouclierEnDestruction = False
def respawn(self):
self.rect.topleft = self.spawn.topleft
self.direction = "bas"
self.life -= 1
self.bombe_detection = True
self.bombe_range = BOMB_RANGE
self.bombe_max_number = 1
self.bombe_number = 1
self.velocity = PLAYER_SPEED
self.speed = [0, 0]
def die(self, serveur):
channel = serveur.channelByNumero(self.numero)
channel.Send({'action': 'game_over', 'message': 'vous avez perdu'})
serveur.del_client(channel)
serveur.check_win()
def up(self):
self.speed[1] = -self.velocity
self.direction = "haut"
def down(self):
self.speed[1] = self.velocity
self.direction = "bas"
def left(self):
self.speed[0] = -self.velocity
self.direction = "gauche"
def right(self):
self.speed[0] = self.velocity
self.direction = "droite"
def poseBombe(self, groupeBombes, channels):
if self.bombe_number <= 0:
return
bomb_centerx = (SIDE_LENGTH * round(self.rect.centerx / SIDE_LENGTH)) + SIDE_LENGTH/2
bomb_centery = (SIDE_LENGTH * round(self.rect.centery / SIDE_LENGTH)) + SIDE_LENGTH/2
for b in groupeBombes:
if b.rect.center == (bomb_centerx, bomb_centery):
return # Il y a déjà une bombe ici, on annule
self.bombe_number -= 1
self.bombe_detection = False
bombe = Bombe(self, bomb_centerx, bomb_centery)
groupeBombes.add(bombe)
for c in channels:
c.Send(
{'action': 'bombe', 'bombe_center': bombe.rect.center, 'bombe_id': bombe.id, 'joueur_id': self.numero})
def update(self, serveur):
if self.life <= 0:
print "Le joueur %d vient de mourir" % self.numero
self.die(serveur)
return
collision_flammes = pygame.sprite.spritecollide(self, serveur.flammes, False,
pygame.sprite.collide_rect_ratio(0.9))
shieldState = self.checkShield(collision_flammes)
if not self.isAtSpawn() and collision_flammes:
if shieldState:
if self.life > 1:
print "Le joueur %d vient d'exploser (mais n'est pas mort, c'est un Chuck Norris)" % self.numero
self.respawn()
else:
ancienCentre = self.rect.center
self.rect = self.rect.move(self.speed)
collisions_murs = pygame.sprite.spritecollide(self, serveur.murs, False)
collisions_caisses = pygame.sprite.spritecollide(self, serveur.caisses, False)
collisions_bombes = pygame.sprite.spritecollide(self, serveur.bombes, False)
if collisions_murs or collisions_caisses or (self.bombe_detection and collisions_bombes):
self.rect.center = ancienCentre
# On arrondit la position pour qu'il soit aligné
self.rect.x = SIDE_LENGTH * round(self.rect.midtop[0] / SIDE_LENGTH)
self.rect.y = SIDE_LENGTH * round(self.rect.midright[1] / SIDE_LENGTH)
elif not self.bombe_detection and not collisions_bombes:
self.bombe_detection = True
else:
for power_up in pygame.sprite.spritecollide(self, serveur.power_ups, False):
power_up.effet(self)
power_up.die(serveur.clients)
self.speed = [0, 0]
def isAtSpawn(self):
if self.spawn.topleft == self.rect.topleft:
self.is_at_spawn = True
elif self.is_at_spawn:
if not self.rect.colliderect(self.spawn):
self.is_at_spawn = False
return self.is_at_spawn
def checkShield(self, coll):
if not self.bouclier:
return True
if self.bouclier and coll:
self.bouclierEnDestruction = True
return False
if self.bouclierEnDestruction and not coll:
self.bouclier = False
self.bouclierEnDestruction = False
return False
|
simsor/BomberLAN
|
serveur/joueur.py
|
Python
|
mit
| 5,116
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api
import logging
_logger = logging.getLogger(__name__)
class IrModuleModule(models.Model):
_inherit = 'ir.module.module'
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
""" We change this module search because odoo in module account
loads l10n_ar chart on post_install "_auto_install_l10n"
"""
new_args = []
for arg in args:
if arg[0] == 'name' and arg[1] == '=' and arg[2] == 'l10n_ar':
# we overrwite because arg can be a tuple or a list
# tuple does not support item assignment
# arg = (arg[0], arg[1], 'l10n_ar_chart')
arg = list(arg)
arg[2] = 'l10n_ar_chart'
elif arg[0] == 'name' and arg[1] == 'in' and 'l10n_ar' in arg[2]:
arg = list(arg)
arg[2] = [
'l10n_ar_chart' if x == 'l10n_ar' else x for x in arg[2]]
new_args.append(arg)
return super(IrModuleModule, self).search(
new_args, offset, limit, order, count=count)
class argentinian_base_configuration(models.TransientModel):
_name = 'argentinian.base.config.settings'
_inherit = 'res.config.settings'
module_l10n_ar_bank_cbu = fields.Boolean(
'Add CBU on bank account information',
help="""Installs the l10n_ar_bank_cbu module.""")
module_l10n_ar_chart_generic = fields.Boolean(
'Generic Argentinian Chart of Account',
help="""Installs the l10n_ar_chart_generic module.""")
module_l10n_ar_bank = fields.Boolean(
'Banks of Argentina',
help="Installs the l10n_ar_bank module that create banks of Argetina "
" based on a webservice")
module_l10n_ar_base_vat = fields.Boolean(
'Argentinian VAT validation',
help="Installs the l10n_ar_base_vat module that extends base_vat"
" modules so that you can add argentinian VATs (usually called"
"cuit/cuil)")
module_l10n_ar_invoice = fields.Boolean(
'Argentinian invoicing and other documents Management',
help="Installs the l10n_ar_invoice module. It creates some clases"
" to manage afip functionality, for example document class, journal"
" class, document letters, vat categories, etc.")
module_l10n_ar_partner_title = fields.Boolean(
'Partner reference and titles usually used in Argentina',
help="""Installs the l10n_ar_partner_title module. """)
module_l10n_ar_states = fields.Boolean(
'Argentinian States',
help="""Installs the l10n_ar_states module. """)
module_l10n_ar_vat_reports = fields.Boolean(
'Argentinian Sale/Purchase Vat Reports',
help="""Installs the l10n_ar_vat_reports module. """)
module_l10n_ar_hide_receipts = fields.Boolean(
'Hide sale/purchase receipts menus.',
help="""Installs the l10n_ar_hide_receipts module. """)
module_account_accountant = fields.Boolean(
'Manage Financial and Analytic Accounting.',
help="""Installs the account_accountant module. """)
module_l10n_ar_afipws_fe = fields.Boolean(
'Use Electronic Invoicing.',
help="""Installs the l10n_ar_afipws_fe module. """)
module_l10n_ar_account_vat_ledger = fields.Boolean(
'Add Account VAT Ledger models and report.',
help="""Installs the l10n_ar_account_vat_ledger module. """)
module_l10n_ar_account_vat_ledger_city = fields.Boolean(
'Add Account VAT Ledger TAX entity information requirements by file.',
help="""Installs the l10n_ar_account_vat_ledger_city module. """)
module_l10n_ar_chart_generic_withholding = fields.Boolean(
'Add generic withholding management.',
help="""Installs the l10n_ar_chart_generic_withholding module. """)
# Sales
module_l10n_ar_invoice_sale = fields.Boolean(
'Add availabilty to use VAT included or not on sales',
help="""Installs the l10n_ar_invoice_sale module.""")
# Aeroo reports
module_l10n_ar_aeroo_voucher = fields.Boolean(
'Argentinian Like Voucher Aeroo Report',
help="""Installs the module_l10n_ar_aeroo_voucher module.""")
module_l10n_ar_aeroo_invoice = fields.Boolean(
'Argentinian Aeroo Like Invoice Report',
help="""Installs the module_l10n_ar_aeroo_invoice module.""")
module_l10n_ar_aeroo_einvoice = fields.Boolean(
'Argentinian Aeroo Like Electronic Invoice Report',
help="""Installs the module_l10n_ar_aeroo_einvoice module.""")
module_l10n_ar_aeroo_stock = fields.Boolean(
'Argentinian Aeroo Like Remit Report',
help="""Installs the l10n_ar_aeroo_stock module.""")
module_l10n_ar_aeroo_purchase = fields.Boolean(
'Argentinian Aeroo Like Purchase Reports',
help="""Installs the l10n_ar_aeroo_purchase module.""")
module_l10n_ar_aeroo_sale = fields.Boolean(
'Argentinian Aeroo Like Sale Reports',
help="""Installs the l10n_ar_aeroo_sale module.""")
# module_l10n_ar_aeroo_receipt = fields.Boolean(
# 'Argentinian Aeroo Like Receipt Report',
# help="""Installs the l10n_ar_aeroo_receipt module.""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
odoo-argentina/config
|
l10n_ar_base/res_config.py
|
Python
|
agpl-3.0
| 5,591
|
#!/usr/bin/env python2
import nose.tools as nt
from ovirtlago import testlib
@testlib.with_ovirt_prefix
def test_cpu_model_host(prefix):
cpu_family = prefix.virt_env.get_ovirt_cpu_family()
nt.assert_equals(cpu_family, 'Intel Westmere Family')
@testlib.with_ovirt_prefix
def test_cpu_model_engine(prefix):
engine = prefix.virt_env.engine_vm()
cpu_family = prefix.virt_env.get_ovirt_cpu_family(host=engine)
nt.assert_equals(cpu_family, 'AMD Opteron G1')
@testlib.with_ovirt_prefix
def test_ssh(prefix):
engine = prefix.virt_env.engine_vm()
ret = engine.ssh(['hostname'])
nt.assert_equals(ret.code, 0)
@testlib.with_ovirt_prefix
def test_service(prefix):
engine = prefix.virt_env.engine_vm()
nt.assert_true(engine.service('sshd').alive())
|
lago-project/lago-ost-plugin
|
tests/functional/fixtures/ovirt.runtest/002_testlib.py
|
Python
|
gpl-2.0
| 784
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from websocket import create_connection
from websocket import ABNF
from reimp import Box
ws = create_connection("ws://127.0.0.1:8000/echo")
# ws = create_connection("ws://115.28.224.64:8000/echo")
box = Box()
box.cmd = 101
box.body = '我爱你'
t1 = time.time()
# 二进制协议
ws.send(box.pack(), ABNF.OPCODE_BINARY)
result = ws.recv()
print 'time past: ', time.time() - t1
print "Received '%r'" % result
recv_box = Box()
recv_box.unpack(result)
print recv_box
ws.close()
|
yangdw/repo.python
|
src/annotation/haven/examples/websocket_demo/client.py
|
Python
|
mit
| 538
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt
from pisi.actionsapi import kde4
#from pisi.actionsapi import pisitools
# if pisi can't find source directory, see /var/pisi/nmapis4/work/ and:
# WorkDir="nmapis4-"+ get.srcVERSION() +"/sub_project_dir/"
def setup():
kde4.configure()
def build():
kde4.make()
def install():
kde4.install()
# Take a look at the source folder for these file as documentation.
# pisitools.dodoc("AUTHORS", "BUGS", "ChangeLog", "COPYING", "README")
# If there is no install rule for a runnable binary, you can
# install it to binary directory.
# pisitools.dobin("nmapis4")
# You can use these as variables, they will replace GUI values before build.
# Package Name : nmapis4
# Version : 0.4.1
# Summary : NmapSi4 is a complete Qt4-based Gui
# For more information, you can look at the Actions API
# from the Help menu and toolbar.
# By PiSiDo 2.0.0
|
pisiganesh/my_pisi_files
|
nmapsi4/actions.py
|
Python
|
gpl-2.0
| 1,010
|
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from django.test import TestCase
from StringIO import StringIO
import mock
import pytest
import factory
from openpyxl import Workbook
from spreadsheetresponsemixin import SpreadsheetResponseMixin
from .models import MockModel, MockAuthor
class MockModelFactory(factory.django.DjangoModelFactory):
class Meta:
model = MockModel
title = factory.Sequence(lambda n: 'title{0}'.format(n))
class MockAuthorFactory(factory.django.DjangoModelFactory):
class Meta:
model = MockAuthor
name = factory.Sequence(lambda n: 'name{0}'.format(n))
class GenerateDataTests(TestCase):
def setUp(self):
self.mixin = SpreadsheetResponseMixin()
self.author = MockAuthorFactory()
self.mock = MockModelFactory(author=self.author)
self.mock2 = MockModelFactory(author=self.author)
self.queryset = MockModel.objects.all()
def test_assertion_error_raised_if_not_a_queryset_is_sent(self):
with pytest.raises(AssertionError):
self.mixin.queryset = [1]
list(self.mixin.generate_data())
def test_if_queryset_is_none_gets_self_queryset(self):
self.mixin.queryset = MockModel.objects.all()
self.assertSequenceEqual(MockModel.objects.values_list(),
list(self.mixin.generate_data()))
def test_returns_values_list_qs_if_queryset(self):
self.mixin.queryset = self.queryset
expected_list = self.queryset.values_list()
actual_list = self.mixin.generate_data()
assert list(actual_list) == list(expected_list)
def test_returns_values_list_if_qs_is_values_queryset(self):
self.mixin.queryset = self.queryset.values()
expected_list = MockModel.objects.all().values_list()
actual_list = self.mixin.generate_data()
assert list(actual_list) == list(expected_list)
def test_returns_self_if_qs_is_values_list_queryset(self):
values_list_queryset = MockModel.objects.all().values_list()
self.mixin.queryset = values_list_queryset
actual_list = self.mixin.generate_data()
assert list(actual_list) == list(values_list_queryset)
def test_uses_specified_fields(self):
fields = ('title',)
values_list_queryset = MockModel.objects.all().values_list()
self.mixin.queryset = values_list_queryset
# We expect it to be filtered by title,
# even though full value_list is passed
expected_list = MockModel.objects.all().values_list(*fields)
actual_list = self.mixin.generate_data(fields)
assert list(actual_list) == list(expected_list)
def test_follows_foreign_key_with_model_queryset(self):
fields = ('title', 'author__name')
queryset = MockModel.objects.all()
self.mixin.queryset = queryset
expected_list = [
(self.mock.title, self.author.name),
(self.mock2.title, self.author.name),
]
actual_list = self.mixin.generate_data(fields)
assert list(actual_list) == list(expected_list)
def test_allows_calculated_field_values(self):
fields = ('title', 'author__name', 'calculated')
queryset = MockModel.objects.all()
self.mixin.queryset = queryset
expected_list = [
(self.mock.title, self.author.name, u'whee %d' % self.mock.id),
(self.mock2.title, self.author.name, u'whee %d' % self.mock2.id),
]
self.mixin.calculated = lambda values: 'whee %d' % values[0]
self.mixin.calculated.fields = ['id']
actual_list = self.mixin.generate_data(fields)
self.assertEqual(list(actual_list), expected_list)
def test_follows_foreign_key_with_values_list_queryset(self):
fields = ('title', 'author__name')
values_list_queryset = MockModel.objects.all().values_list()
self.mixin.queryset = values_list_queryset
expected_list = [
(self.mock.title, self.author.name),
(self.mock2.title, self.author.name),
]
actual_list = self.mixin.generate_data(fields)
assert list(actual_list) == list(expected_list)
def test_reverse_ordering_when_fields_specified(self):
fields = ('title', 'id')
self.mixin.queryset = self.queryset
actual_list = self.mixin.generate_data(fields)
assert list(actual_list)[0] == (self.mock.title, self.mock.id)
def test_allows_evaluation_using_models(self):
fields = ('title', 'author__name', 'calculated')
self.mixin.queryset = self.queryset
expected_list = [
(self.mock.title, self.author.name, u'whee %d' % self.mock.id),
(self.mock2.title, self.author.name, u'whee %d' % self.mock2.id),
]
self.mixin.use_models = True
self.mixin.calculated = lambda model: 'whee %d' % model.id
actual_list = self.mixin.generate_data(fields)
self.assertEqual(list(actual_list), expected_list)
class GenerateXlsxTests(TestCase):
def setUp(self):
self.data = (('row1col1', 'row1col2'), ('row2col1', 'row2col2'))
self.mixin = SpreadsheetResponseMixin()
def _get_sheet(self, wb):
return wb.active
def test_returns_workbook_if_no_file_passed(self):
assert type(self.mixin.generate_xlsx(self.data)) == Workbook
def test_if_file_is_passed_it_is_returned_with_content_added(self):
given_content = StringIO()
assert given_content.getvalue() == ''
self.mixin.generate_xlsx(self.data, file=given_content)
assert given_content.getvalue() > ''
def test_adds_row_of_data(self):
wb = self.mixin.generate_xlsx(self.data)
ws = self._get_sheet(wb)
assert ws.cell(column=1, row=1).value == 'row1col1'
assert ws.cell(column=2, row=2).value == 'row2col2'
def test_inserts_headers_if_provided(self):
headers = ('ColA', 'ColB')
wb = self.mixin.generate_xlsx(self.data, headers)
ws = self._get_sheet(wb)
assert ws.cell(column=1, row=1).value == 'ColA'
assert ws.cell(column=2, row=2).value == 'row1col2'
class GenerateCsvTests(TestCase):
def setUp(self):
self.data = (('row1col1', 'row1col2'), ('row2col1', 'row2col2'))
self.mixin = SpreadsheetResponseMixin()
def test_if_no_file_passed_in_stringio_is_returned(self):
generated_csv = self.mixin.generate_csv(self.data)
assert type(generated_csv) == type(StringIO())
def test_if_file_is_passed_it_is_returned_with_content_added(self):
given_content = mock.MagicMock()
returned_file = self.mixin.generate_csv(self.data, file=given_content)
assert returned_file == given_content
def test_adds_row_of_data(self):
expected_string = \
'row1col1,row1col2\r\nrow2col1,row2col2\r\n'
generated_csv = self.mixin.generate_csv(self.data)
assert generated_csv.getvalue() == expected_string
def test_inserts_headers_if_provided(self):
headers = ('ColA', 'ColB')
expected_string = \
'ColA,ColB\r\nrow1col1,row1col2\r\nrow2col1,row2col2\r\n'
generated_csv = self.mixin.generate_csv(self.data, headers)
assert generated_csv.getvalue() == expected_string
def test_handles_data_with_commas_correctly_by_putting_quotes(self):
data = (('Title is, boo', '2'), )
expected_string = \
'"Title is, boo",2\r\n'
generated_csv = self.mixin.generate_csv(data)
assert generated_csv.getvalue() == expected_string
def test_handles_data_with_quotes_correctly(self):
data = (('Title is, "boo"', '2'), )
expected_string = \
'"Title is, ""boo""",2\r\n'
generated_csv = self.mixin.generate_csv(data)
assert generated_csv.getvalue() == expected_string
def test_handles_data_in_unicode_correctly(self):
data = ((u'Bumblebee is čmrlj', '2'), )
expected_string = u'Bumblebee is čmrlj,2\r\n'.encode('utf-8')
generated_csv = self.mixin.generate_csv(data)
assert generated_csv.getvalue() == expected_string
class RenderSetupTests(TestCase):
def setUp(self):
self.mixin = SpreadsheetResponseMixin()
self.mixin.generate_headers = mock.MagicMock()
self.mixin.queryset = MockModel.objects.all()
self.fields = (u'title',)
def test_if_no_self_queryset_raise_improperlyconfigured(self):
delattr(self.mixin, 'queryset')
with pytest.raises(NotImplementedError):
list(self.mixin.render_setup())
def test_get_fields_is_called_with_kwargs(self):
self.mixin.get_fields = mock.MagicMock()
self.mixin.render_setup(a=1, b=2)
self.mixin.get_fields.assert_called_once_with(a=1, b=2)
def test_generate_data_is_called_once_with_fields_and_queryset(self):
self.mixin.generate_data = mock.MagicMock()
qs = MockModel.objects.all()
self.mixin.render_excel_response(queryset=qs, fields=self.fields)
self.mixin.generate_data.assert_called_once_with(fields=self.fields)
def test_if_no_headers_passed_generate_headers_called(self):
self.mixin.render_excel_response(fields=self.fields)
self.mixin.generate_headers.assert_called_once_with(MockModel,
fields=self.fields)
def test_returns_attachment_with_correct_filename(self):
expected_disposition = 'attachment; filename="export.csv"'
response = self.mixin.render_csv_response()
actual_disposition = response._headers['content-disposition'][1]
assert actual_disposition == expected_disposition
self.mixin.filename = 'data.dump'
expected_disposition = 'attachment; filename="data.dump"'
response = self.mixin.render_csv_response()
actual_disposition = response._headers['content-disposition'][1]
assert actual_disposition == expected_disposition
expected_disposition = 'attachment; filename="data.dump"'
response = self.mixin.render_excel_response()
actual_disposition = response._headers['content-disposition'][1]
assert actual_disposition == expected_disposition
delattr(self.mixin, 'filename')
self.mixin.filename_base = 'data.dump'
expected_disposition = 'attachment; filename="data.dump.csv"'
response = self.mixin.render_csv_response()
actual_disposition = response._headers['content-disposition'][1]
assert actual_disposition == expected_disposition
expected_disposition = 'attachment; filename="data.dump.xlsx"'
response = self.mixin.render_excel_response()
actual_disposition = response._headers['content-disposition'][1]
assert actual_disposition == expected_disposition
class RenderExcelResponseTests(TestCase):
def setUp(self):
self.mixin = SpreadsheetResponseMixin()
MockModelFactory()
self.queryset = MockModel.objects.all()
self.mixin.queryset = self.queryset
def test_returns_httpresponse(self):
assert type(self.mixin.render_excel_response()) == HttpResponse
def test_returns_xlsx_content_type(self):
expected_content_type = \
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
response = self.mixin.render_excel_response()
actual_content_type = response._headers['content-type'][1]
assert actual_content_type == expected_content_type
def test_returns_attachment_content_disposition(self):
expected_disposition = 'attachment; filename="export.xlsx"'
response = self.mixin.render_excel_response()
actual_disposition = response._headers['content-disposition'][1]
assert actual_disposition == expected_disposition
def test_get_filename_called_with_csv_parameter(self):
self.mixin.get_filename = mock.MagicMock()
self.mixin.render_excel_response()
self.mixin.get_filename.assert_called_once_with(extension='xlsx')
def test_generate_xslx_is_called_with_data(self):
self.mixin.generate_xlsx = mock.MagicMock()
self.mixin.render_excel_response()
data = self.mixin.generate_data()
mut = self.mixin.generate_xlsx
assert mut.call_count == 1
assert list(mut.call_args.__getnewargs__()[0][1]['data']) == list(data)
def test_generate_xslx_is_called_with_headers(self):
self.mixin.generate_xlsx = mock.MagicMock()
headers = ('ColA', 'ColB')
self.mixin.render_excel_response(headers=headers)
mut = self.mixin.generate_xlsx
assert mut.call_count == 1
assert mut.call_args.__getnewargs__()[0][1]['headers'] == headers
def test_generate_xslx_is_called_with_response(self):
self.mixin.generate_xlsx = mock.MagicMock()
self.mixin.render_excel_response()
mut = self.mixin.generate_xlsx
assert mut.call_count == 1
assert type(mut.call_args.__getnewargs__()[0][1]['file']) \
== HttpResponse
class RenderCsvResponseTests(TestCase):
def setUp(self):
self.author = MockAuthorFactory()
MockModelFactory(author=self.author)
self.queryset = MockModel.objects.all()
self.mixin = SpreadsheetResponseMixin()
self.mixin.queryset = self.queryset
def test_returns_httpresponse(self):
assert type(self.mixin.render_csv_response()) == HttpResponse
def test_returns_csv_content_type(self):
expected_content_type = 'text/csv'
response = self.mixin.render_csv_response()
actual_content_type = response._headers['content-type'][1]
assert actual_content_type == expected_content_type
def test_returns_attachment_content_disposition(self):
expected_disposition = 'attachment; filename="export.csv"'
response = self.mixin.render_csv_response()
actual_disposition = response._headers['content-disposition'][1]
assert actual_disposition == expected_disposition
def test_get_filename_called_with_csv_parameter(self):
self.mixin.get_filename = mock.MagicMock()
self.mixin.render_csv_response()
self.mixin.get_filename.assert_called_once_with(extension='csv')
def test_generate_csv_is_called_with_data(self):
mut = self.mixin.generate_csv = mock.MagicMock()
self.mixin.render_csv_response()
data = self.mixin.generate_data()
assert mut.call_count == 1
assert list(mut.call_args.__getnewargs__()[0][1]['data']) == list(data)
def test_generate_csv_is_called_with_headers(self):
self.mixin.generate_csv = mock.MagicMock()
headers = ('ColA', 'ColB')
self.mixin.render_csv_response(headers=headers)
mut = self.mixin.generate_csv
assert mut.call_count == 1
assert mut.call_args.__getnewargs__()[0][1]['headers'] == headers
def test_generate_csv_is_called_with_response(self):
self.mixin.generate_csv = mock.MagicMock()
self.mixin.render_csv_response()
mut = self.mixin.generate_csv
assert mut.call_count == 1
assert type(mut.call_args.__getnewargs__()[0][1]['file']) \
== HttpResponse
class GenerateHeadersTests(TestCase):
def setUp(self):
MockModelFactory()
self.mixin = SpreadsheetResponseMixin()
self.mixin.queryset = MockModel.objects.all()
self.data = self.mixin.generate_data()
def test_generate_headers_gets_headers_from_model_name(self):
fields = self.mixin.get_fields(model=MockModel)
assert self.mixin.generate_headers(MockModel, fields) == (u'Id', u'Title', u'Author')
def test_generate_headers_keeps_fields_order(self):
fields = ('title', 'id')
headers = self.mixin.generate_headers(MockModel, fields=fields)
assert headers == (u'Title', u'Id')
def test_generate_headers_only_returns_fields_if_fields_is_passed(self):
fields = ('title',)
assert self.mixin.generate_headers(MockModel,
fields=fields) == (u'Title', )
def test_generate_headers_follows_foreign_keys(self):
fields = ('title', 'author__name')
headers = self.mixin.generate_headers(MockModel, fields)
assert headers == (u'Title', u'Author Name')
def test_generate_headers_with_calculated_fields(self):
fields = ('title', 'author__name', 'calculate_this')
self.mixin.calculate_this = lambda values: 'whee %d' % values[0]
headers = self.mixin.generate_headers(MockModel, fields)
assert headers == (u'Title', u'Author Name', u'Calculate This')
def test_generate_headers_with_calculated_fields_with_verbose_names(self):
fields = ('title', 'author__name', 'calculate_this')
self.mixin.calculate_this = lambda values: 'whee %d' % values[0]
self.mixin.calculate_this.verbose_name = 'Whee!'
headers = self.mixin.generate_headers(MockModel, fields)
assert headers == (u'Title', u'Author Name', u'Whee!')
class GetFieldsTests(TestCase):
def setUp(self):
self.mixin = SpreadsheetResponseMixin()
def test_if_fields_defined_on_view(self):
fields = ('title', 'summary')
self.mixin.fields = fields
assert self.mixin.get_fields() == fields
def test_get_fields_from_kwargs(self):
fields = ('title', 'summary')
assert self.mixin.get_fields(fields=fields) == fields
def test_get_fields_from_queryset(self):
self.mixin.queryset = MockModel.objects.all()
assert self.mixin.get_fields() == ['id', 'title', 'author']
def test_get_fields_from_values_list_queryset(self):
self.mixin.queryset = MockModel.objects.all().values_list()
assert self.mixin.get_fields() == ['id', 'title', 'author']
class GetRenderMethodTest(TestCase):
def setUp(self):
self.mixin = SpreadsheetResponseMixin()
def test_raise_notimplemented_for_unknown_format(self):
with pytest.raises(NotImplementedError):
self.mixin.get_render_method('doc')
def test_returns_excel_response_method_for_excel_format(self):
expected_render_method = self.mixin.render_excel_response
assert self.mixin.get_render_method('excel') == expected_render_method
def test_csv_response_method_for_csv_format(self):
expected_render_method = self.mixin.render_csv_response
assert self.mixin.get_render_method('csv') == expected_render_method
class GetFormatTest(TestCase):
def setUp(self):
self.mixin = SpreadsheetResponseMixin()
def test_get_format_from_export_format_kwarg(self):
format = 'excel'
assert self.mixin.get_format(format=format) == format
def test_get_format_from_export_format_attribute(self):
format = 'csv'
self.mixin.format = format
assert self.mixin.get_format() == format
def test_raise_notimplemented_if_export_format_not_supplied(self):
with pytest.raises(NotImplementedError):
self.mixin.get_format()
class GetFilenameTest(TestCase):
def setUp(self):
self.mixin = SpreadsheetResponseMixin()
def test_from_filename_kwarg(self):
filename = 'filename.kwarg'
assert self.mixin.get_filename(filename=filename) == filename
def test_from_filename_attribute(self):
filename = 'filename.attr'
self.mixin.filename = filename
assert self.mixin.get_filename() == filename
def test_default_return_with_no_extension_provided(self):
assert self.mixin.get_filename() == 'export.out'
def test_default_return_with_extension_provided(self):
assert self.mixin.get_filename(extension='blob') == 'export.blob'
|
aptivate/django-spreadsheetresponsemixin
|
tests/test_views.py
|
Python
|
gpl-3.0
| 19,878
|
import os
from ..java import (
Class as JavaClass,
Field as JavaField,
Method as JavaMethod,
Code as JavaCode,
opcodes as JavaOpcodes,
SourceFile,
Signature,
# LineNumberTable
)
from .blocks import Block, IgnoreBlock
from .methods import MainMethod, Method, extract_parameters
from .opcodes import ASTORE_name, ALOAD_name, IF, END_IF
class StaticBlock(Block):
def tweak(self):
self.code = [
# Set up the globals dictionary for the module
JavaOpcodes.NEW('java/util/Hashtable'),
JavaOpcodes.DUP(),
JavaOpcodes.INVOKESPECIAL('java/util/Hashtable', '<init>', '()V'),
JavaOpcodes.PUTSTATIC(self.module.descriptor, 'globals', 'Ljava/util/Hashtable;'),
# Load the Python builtins into the globals.
JavaOpcodes.GETSTATIC(self.module.descriptor, 'globals', 'Ljava/util/Hashtable;'),
JavaOpcodes.LDC('__builtins__'),
JavaOpcodes.NEW('org/python/types/Dict'),
JavaOpcodes.DUP(),
JavaOpcodes.GETSTATIC('org/Python', 'builtins', 'Ljava/util/Hashtable;'),
JavaOpcodes.INVOKESPECIAL('org/python/types/Dict', '<init>', '(Ljava/util/Map;)V'),
JavaOpcodes.INVOKEVIRTUAL('java/util/Hashtable', 'put', '(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;'),
JavaOpcodes.POP()
] + self.code
self.void_return()
def store_name(self, name, arguments, allow_locals=True):
self.add_opcodes(
ASTORE_name(self, '#TEMP#'),
JavaOpcodes.GETSTATIC(self.module.descriptor, 'globals', 'Ljava/util/Hashtable;'),
JavaOpcodes.LDC(name),
ALOAD_name(self, '#TEMP#'),
JavaOpcodes.INVOKEVIRTUAL('java/util/Hashtable', 'put', '(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;'),
JavaOpcodes.POP(),
)
def load_name(self, name, allow_locals=True):
self.add_opcodes(
# look for a global var.
JavaOpcodes.GETSTATIC(self.module.descriptor, 'globals', 'Ljava/util/Hashtable;'),
JavaOpcodes.LDC(name),
JavaOpcodes.INVOKEVIRTUAL('java/util/Hashtable', 'get', '(Ljava/lang/Object;)Ljava/lang/Object;'),
# If there's nothing in the globals, then look for a builtin.
IF(
[JavaOpcodes.DUP()],
JavaOpcodes.IFNONNULL
),
JavaOpcodes.POP(),
JavaOpcodes.GETSTATIC('org/Python', 'builtins', 'Ljava/util/Hashtable;'),
JavaOpcodes.LDC(name),
JavaOpcodes.INVOKEVIRTUAL('java/util/Hashtable', 'get', '(Ljava/lang/Object;)Ljava/lang/Object;'),
# If we still don't have something, throw a NameError.
IF(
[JavaOpcodes.DUP()],
JavaOpcodes.IFNONNULL
),
JavaOpcodes.POP(),
JavaOpcodes.NEW('org/python/exceptions/NameError'),
JavaOpcodes.DUP(),
JavaOpcodes.LDC(name),
JavaOpcodes.INVOKESPECIAL('org/python/exceptions/NameError', '<init>', '(Ljava/lang/String;)V'),
JavaOpcodes.ATHROW(),
END_IF(),
END_IF(),
# Make sure we actually have a Python object
JavaOpcodes.CHECKCAST('org/python/types/Object')
)
def delete_name(self, name, allow_locals=True):
self.add_opcodes(
# look for a global var.
JavaOpcodes.GETSTATIC(self.module.descriptor, 'globals', 'Ljava/util/Hashtable;'),
JavaOpcodes.LDC(name),
JavaOpcodes.INVOKEVIRTUAL('java/util/Hashtable', 'remove', '(Ljava/lang/Object;)Ljava/lang/Object;'),
)
@property
def descriptor(self):
return self.parent.descriptor
@property
def module(self):
return self.parent
def add_method(self, method_name, code):
method = Method(self.module, method_name, extract_parameters(code), static=True)
method.extract(code)
self.module.methods.append(method.transpile())
return method
class Module(Block):
def __init__(self, namespace, sourcefile):
super().__init__()
self.namespace = namespace
self.sourcefile = sourcefile
self.name = os.path.splitext(os.path.basename(sourcefile))[0]
self.methods = []
self.classes = []
self.anonymous_inner_class_count = 0
@property
def descriptor(self):
return '/'.join(self.namespace.split('.') + [self.name])
def transpile(self):
"""Convert a Python code block into a list of Java Classfile definitions.
Returns a list of triples:
(namespace, class_name, javaclassfile)
The list contains the classfile for the module, plus and classes
defined in the module.
"""
main_commands = []
body_commands = []
main_end = None
main = None
for cmd in self.commands:
if main_end is not None:
# Marker for the end of the main block:
if cmd.is_main_end(main_end):
main_end = None
try:
# The last command in the main block is a jump.
# Not sure why it is required, but we can ignore
# it for transpilation purposes.
main = MainMethod(self, main_commands[:-1]).transpile()
except IgnoreBlock:
pass
else:
main_commands.append(cmd)
else:
# Look for a very specific pattern, flagging the "main" method:
# if __name__ == '__main__':
# ...
# which is represented as:
# LOAD_NAME: __name__
# LOAD_CONST: __main__
# COMPARE_OP: ==
# POP_JUMP_IF_FALSE: <end of block target>
# ... <main code>
# <end of block target>
if cmd.is_main_start():
if main is not None:
print("Found duplicate main block... replacing previous main")
main_end = cmd.operation.target
# All other module-level cmds goes into the static block
else:
body_commands.append(cmd)
body = StaticBlock(self, body_commands).transpile()
# If there is any static content, generate a classfile
# for this module
classfile = JavaClass(self.descriptor, supername='org/python/types/Object')
classfile.attributes.append(SourceFile(os.path.basename(self.sourcefile)))
# Add a globals dictionary to the module.
classfile.fields.append(
JavaField(
'globals',
'Ljava/util/Hashtable;',
public=True,
static=True,
attributes=[
Signature('Ljava/util/Hashtable<Ljava/lang/String;Lorg/python/types/Object;>;')
]
)
)
# Add a static method to the module.
static_init = JavaMethod('<clinit>', '()V', public=False, static=True)
static_init.attributes.append(body)
classfile.methods.append(static_init)
if main is None:
print("Adding default main method...")
main = JavaMethod(
'main',
'([Ljava/lang/String;)V',
public=True,
static=True,
attributes=[
JavaCode(
max_stack=0,
max_locals=1,
code=[JavaOpcodes.RETURN()]
)
]
)
classfile.methods.append(main)
# Add any static methods defined in the module
for method in self.methods:
classfile.methods.append(method)
# The list of classfiles that will be returned will contain
# at least one entry - the class for the module itself.
classfiles = [(self.namespace, self.name, classfile)]
# Also output any classes defined in this module.
for namespace, class_name, classfile in self.classes:
classfiles.append((namespace, class_name, classfile))
return classfiles
|
shaunstanislaus/voc
|
voc/python/modules.py
|
Python
|
bsd-3-clause
| 8,535
|
from unittest import TestCase, main
from datetime import datetime
from future.utils import viewitems
from qiita_core.exceptions import IncompetentQiitaDeveloperError
from qiita_core.qiita_settings import qiita_config
from qiita_core.util import qiita_test_checker
from qiita_db.base import QiitaObject
from qiita_db.study import Study, StudyPerson
from qiita_db.investigation import Investigation
from qiita_db.user import User
from qiita_db.util import convert_to_id
from qiita_db.exceptions import (
QiitaDBColumnError, QiitaDBStatusError, QiitaDBError,
QiitaDBUnknownIDError, QiitaDBDuplicateError)
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
@qiita_test_checker()
class TestStudyPerson(TestCase):
def setUp(self):
self.studyperson = StudyPerson(1)
def test_create_studyperson(self):
new = StudyPerson.create('SomeDude', 'somedude@foo.bar', 'affil',
'111 fake street', '111-121-1313')
self.assertEqual(new.id, 4)
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study_person WHERE study_person_id = 4")
self.assertEqual(obs, [[4, 'SomeDude', 'somedude@foo.bar', 'affil',
'111 fake street', '111-121-1313']])
def test_iter(self):
"""Make sure that each and every StudyPerson is retrieved"""
expected = [
('LabDude', 'lab_dude@foo.bar', 'knight lab', '123 lab street',
'121-222-3333'),
('empDude', 'emp_dude@foo.bar', 'broad', None, '444-222-3333'),
('PIDude', 'PI_dude@foo.bar', 'Wash U', '123 PI street', None)]
for i, person in enumerate(StudyPerson.iter()):
self.assertEqual(person.id, i+1)
self.assertEqual(person.name, expected[i][0])
self.assertEqual(person.email, expected[i][1])
self.assertEqual(person.affiliation, expected[i][2])
self.assertEqual(person.address, expected[i][3])
self.assertEqual(person.phone, expected[i][4])
def test_exists(self):
self.assertTrue(StudyPerson.exists('LabDude', 'knight lab'))
self.assertFalse(StudyPerson.exists('AnotherDude', 'knight lab'))
self.assertFalse(StudyPerson.exists('LabDude', 'Another lab'))
def test_create_studyperson_already_exists(self):
obs = StudyPerson.create('LabDude', 'lab_dude@foo.bar', 'knight lab')
self.assertEqual(obs.name, 'LabDude')
self.assertEqual(obs.email, 'lab_dude@foo.bar')
def test_retrieve_name(self):
self.assertEqual(self.studyperson.name, 'LabDude')
def test_set_name_fail(self):
with self.assertRaises(AttributeError):
self.studyperson.name = 'Fail Dude'
def test_retrieve_email(self):
self.assertEqual(self.studyperson.email, 'lab_dude@foo.bar')
def test_retrieve_affiliation(self):
self.assertEqual(self.studyperson.affiliation, 'knight lab')
def test_set_email_fail(self):
with self.assertRaises(AttributeError):
self.studyperson.email = 'faildude@foo.bar'
def test_set_affiliation_fail(self):
with self.assertRaises(AttributeError):
self.studyperson.affiliation = 'squire lab'
def test_retrieve_address(self):
self.assertEqual(self.studyperson.address, '123 lab street')
def test_retrieve_address_null(self):
person = StudyPerson(2)
self.assertEqual(person.address, None)
def test_set_address(self):
self.studyperson.address = '123 nonsense road'
self.assertEqual(self.studyperson.address, '123 nonsense road')
def test_retrieve_phone(self):
self.assertEqual(self.studyperson.phone, '121-222-3333')
def test_retrieve_phone_null(self):
person = StudyPerson(3)
self.assertEqual(person.phone, None)
def test_set_phone(self):
self.studyperson.phone = '111111111111111111121'
self.assertEqual(self.studyperson.phone, '111111111111111111121')
@qiita_test_checker()
class TestStudy(TestCase):
def setUp(self):
self.study = Study(1)
self.portal = qiita_config.portal
self.info = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"number_samples_collected": 25,
"number_samples_promised": 28,
"study_alias": "FCM",
"study_description": "Microbiome of people who eat nothing but "
"fried chicken",
"study_abstract": "Exploring how a high fat diet changes the "
"gut microbiome",
"emp_person_id": StudyPerson(2),
"principal_investigator_id": StudyPerson(3),
"lab_person_id": StudyPerson(1)
}
self.infoexp = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"number_samples_collected": 25,
"number_samples_promised": 28,
"study_alias": "FCM",
"study_description": "Microbiome of people who eat nothing but "
"fried chicken",
"study_abstract": "Exploring how a high fat diet changes the "
"gut microbiome",
"emp_person_id": 2,
"principal_investigator_id": 3,
"lab_person_id": 1
}
self.existingexp = {
'mixs_compliant': True,
'metadata_complete': True,
'reprocess': False,
'number_samples_promised': 27,
'emp_person_id': StudyPerson(2),
'funding': None,
'vamps_id': None,
'first_contact': datetime(2014, 5, 19, 16, 10),
'principal_investigator_id': StudyPerson(3),
'timeseries_type_id': 1,
'study_abstract':
"This is a preliminary study to examine the "
"microbiota associated with the Cannabis plant. Soils samples "
"from the bulk soil, soil associated with the roots, and the "
"rhizosphere were extracted and the DNA sequenced. Roots "
"from three independent plants of different strains were "
"examined. These roots were obtained November 11, 2011 from "
"plants that had been harvested in the summer. Future "
"studies will attempt to analyze the soils and rhizospheres "
"from the same location at different time points in the plant "
"lifecycle.",
'spatial_series': False,
'study_description': 'Analysis of the Cannabis Plant Microbiome',
'study_alias': 'Cannabis Soils',
'most_recent_contact': '2014-05-19 16:11',
'most_recent_contact': datetime(2014, 5, 19, 16, 11),
'lab_person_id': StudyPerson(1),
'number_samples_collected': 27}
def tearDown(self):
qiita_config.portal = self.portal
def _change_processed_data_status(self, new_status):
# Change the status of the studies by changing the status of their
# processed data
id_status = convert_to_id(new_status, 'processed_data_status')
self.conn_handler.execute(
"UPDATE qiita.processed_data SET processed_data_status_id = %s",
(id_status,))
def test_get_info(self):
# Test get all info for single study
qiita_config.portal = 'QIITA'
obs = Study.get_info([1])
self.assertEqual(len(obs), 1)
obs = dict(obs[0])
exp = {
'mixs_compliant': True, 'metadata_complete': True,
'reprocess': False, 'timeseries_type': 'None',
'number_samples_promised': 27, 'emp_person_id': 2,
'funding': None, 'vamps_id': None,
'first_contact': datetime(2014, 5, 19, 16, 10),
'principal_investigator_id': 3, 'timeseries_type_id': 1,
'pmid': ['123456', '7891011'], 'study_alias': 'Cannabis Soils',
'spatial_series': False,
'study_abstract': 'This is a preliminary study to examine the '
'microbiota associated with the Cannabis plant. Soils samples from'
' the bulk soil, soil associated with the roots, and the '
'rhizosphere were extracted and the DNA sequenced. Roots from '
'three independent plants of different strains were examined. '
'These roots were obtained November 11, 2011 from plants that had '
'been harvested in the summer. Future studies will attempt to '
'analyze the soils and rhizospheres from the same location at '
'different time points in the plant lifecycle.',
'study_description': 'Analysis of the Cannabis Plant Microbiome',
'intervention_type': 'None', 'email': 'test@foo.bar',
'study_id': 1,
'most_recent_contact': datetime(2014, 5, 19, 16, 11),
'lab_person_id': 1,
'study_title': 'Identification of the Microbiomes for Cannabis '
'Soils', 'number_samples_collected': 27}
self.assertItemsEqual(obs, exp)
# Test get specific keys for single study
exp_keys = ['metadata_complete', 'reprocess', 'timeseries_type',
'pmid', 'study_title']
obs = Study.get_info([1], exp_keys)
self.assertEqual(len(obs), 1)
obs = dict(obs[0])
exp = {
'metadata_complete': True, 'reprocess': False,
'timeseries_type': 'None',
'pmid': ['123456', '7891011'],
'study_title': 'Identification of the Microbiomes for Cannabis '
'Soils'}
self.assertItemsEqual(obs, exp)
# Test get specific keys for all studies
info = {
'timeseries_type_id': 1,
'lab_person_id': None,
'principal_investigator_id': 3,
'metadata_complete': False,
'mixs_compliant': True,
'study_description': 'desc',
'study_alias': 'alias',
'study_abstract': 'abstract'}
user = User('test@foo.bar')
Study.create(user, 'test_study_1', efo=[1], info=info)
obs = Study.get_info(info_cols=exp_keys)
exp = [[True, ['123456', '7891011'], False,
'Identification of the Microbiomes for Cannabis Soils',
'None'],
[False, None, False, 'test_study_1', 'None']]
self.assertEqual(obs, exp)
# test portal restriction working
qiita_config.portal = 'EMP'
with self.assertRaises(QiitaDBError):
Study.get_info([1])
def test_has_access_public(self):
self._change_processed_data_status('public')
qiita_config.portal = 'QIITA'
self.assertTrue(self.study.has_access(User("demo@microbio.me")))
qiita_config.portal = 'EMP'
with self.assertRaises(QiitaDBError):
Study(1).has_access(User("demo@microbio.me"))
def test_has_access_no_public(self):
self._change_processed_data_status('public')
self.assertFalse(self.study.has_access(User("demo@microbio.me"), True))
def test_owner(self):
self.assertEqual(self.study.owner, "test@foo.bar")
def test_share(self):
# Clear all sharing associations
self._change_processed_data_status('sandbox')
self.conn_handler.execute("delete from qiita.study_users")
self.assertEqual(self.study.shared_with, [])
# Try to share with the owner, which should not work
self.study.share(User("test@foo.bar"))
self.assertEqual(self.study.shared_with, [])
# Then share the study with shared@foo.bar
self.study.share(User("shared@foo.bar"))
self.assertEqual(self.study.shared_with, ["shared@foo.bar"])
def test_unshare(self):
self._change_processed_data_status('sandbox')
self.study.unshare(User("shared@foo.bar"))
self.assertEqual(self.study.shared_with, [])
def test_has_access_shared(self):
self._change_processed_data_status('sandbox')
self.assertTrue(self.study.has_access(User("shared@foo.bar")))
def test_has_access_private(self):
self._change_processed_data_status('sandbox')
self.assertTrue(self.study.has_access(User("test@foo.bar")))
def test_has_access_admin(self):
self._change_processed_data_status('sandbox')
self.assertTrue(self.study.has_access(User("admin@foo.bar")))
def test_has_access_no_access(self):
self._change_processed_data_status('sandbox')
self.assertFalse(self.study.has_access(User("demo@microbio.me")))
def test_get_by_status(self):
obs = Study.get_by_status('sandbox')
self.assertEqual(obs, set())
Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
obs = Study.get_by_status('private')
self.assertEqual(obs, {1})
obs = Study.get_by_status('sandbox')
self.assertEqual(obs, {2})
obs = Study.get_by_status('public')
self.assertEqual(obs, set())
obs = Study.get_by_status('awaiting_approval')
self.assertEqual(obs, set())
def test_exists(self):
self.assertTrue(Study.exists('Identification of the Microbiomes for '
'Cannabis Soils'))
self.assertFalse(Study.exists('Not Cannabis Soils'))
def test_create_duplicate(self):
with self.assertRaises(QiitaDBDuplicateError):
Study.create(
User('test@foo.bar'),
'Identification of the Microbiomes for Cannabis Soils',
[1], self.info)
def test_create_study_min_data(self):
"""Insert a study into the database"""
before = datetime.now()
obs = Study.create(User('test@foo.bar'), "Fried chicken microbiome",
[1], self.info)
after = datetime.now()
self.assertEqual(obs.id, 2)
exp = {'mixs_compliant': True, 'metadata_complete': True,
'reprocess': False,
'number_samples_promised': 28, 'emp_person_id': 2,
'funding': None, 'vamps_id': None,
'principal_investigator_id': 3,
'timeseries_type_id': 1,
'study_abstract': 'Exploring how a high fat diet changes the '
'gut microbiome',
'email': 'test@foo.bar', 'spatial_series': None,
'study_description': 'Microbiome of people who eat nothing but'
' fried chicken',
'study_alias': 'FCM', 'study_id': 2,
'most_recent_contact': None, 'lab_person_id': 1,
'study_title': 'Fried chicken microbiome',
'number_samples_collected': 25}
obsins = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study WHERE study_id = 2")
self.assertEqual(len(obsins), 1)
obsins = dict(obsins[0])
# Check the timestamp separately, since it is set by the database
# to the microsecond, and we can't predict it a priori
ins_timestamp = obsins.pop('first_contact')
self.assertTrue(before < ins_timestamp < after)
self.assertEqual(obsins, exp)
# make sure EFO went in to table correctly
efo = self.conn_handler.execute_fetchall(
"SELECT efo_id FROM qiita.study_experimental_factor "
"WHERE study_id = 2")
self.assertEqual(efo, [[1]])
def test_create_nonqiita_portal(self):
qiita_config.portal = "EMP"
Study.create(User('test@foo.bar'), "NEW!",
[1], self.info, Investigation(1))
# make sure portal is associated
obs = self.conn_handler.execute_fetchall(
"SELECT * from qiita.study_portal WHERE study_id = 2")
self.assertEqual(obs, [[2, 2], [2, 1]])
def test_create_study_with_investigation(self):
"""Insert a study into the database with an investigation"""
obs = Study.create(User('test@foo.bar'), "Fried chicken microbiome",
[1], self.info, Investigation(1))
self.assertEqual(obs.id, 2)
# check the investigation was assigned
obs = self.conn_handler.execute_fetchall(
"SELECT * from qiita.investigation_study WHERE study_id = 2")
self.assertEqual(obs, [[1, 2]])
def test_create_study_all_data(self):
"""Insert a study into the database with every info field"""
self.info.update({
'vamps_id': 'MBE_1111111',
'funding': 'FundAgency',
'spatial_series': True,
'metadata_complete': False,
'reprocess': True,
'first_contact': "10/24/2014 12:47PM",
'study_id': 3827
})
obs = Study.create(User('test@foo.bar'), "Fried chicken microbiome",
[1], self.info)
self.assertEqual(obs.id, 3827)
exp = {'mixs_compliant': True, 'metadata_complete': False,
'reprocess': True,
'number_samples_promised': 28, 'emp_person_id': 2,
'funding': 'FundAgency', 'vamps_id': 'MBE_1111111',
'first_contact': datetime(2014, 10, 24, 12, 47),
'principal_investigator_id': 3, 'timeseries_type_id': 1,
'study_abstract': 'Exploring how a high fat diet changes the '
'gut microbiome',
'email': 'test@foo.bar', 'spatial_series': True,
'study_description': 'Microbiome of people who eat nothing '
'but fried chicken',
'study_alias': 'FCM', 'study_id': 3827,
'most_recent_contact': None, 'lab_person_id': 1,
'study_title': 'Fried chicken microbiome',
'number_samples_collected': 25}
obsins = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study WHERE study_id = 3827")
self.assertEqual(len(obsins), 1)
obsins = dict(obsins[0])
self.assertEqual(obsins, exp)
# make sure EFO went in to table correctly
obsefo = self.conn_handler.execute_fetchall(
"SELECT efo_id FROM qiita.study_experimental_factor "
"WHERE study_id = 3827")
self.assertEqual(obsefo, [[1]])
def test_create_missing_required(self):
""" Insert a study that is missing a required info key"""
self.info.pop("study_alias")
with self.assertRaises(QiitaDBColumnError):
Study.create(User('test@foo.bar'), "Fried Chicken Microbiome",
[1], self.info)
def test_create_empty_efo(self):
""" Insert a study that is missing a required info key"""
with self.assertRaises(IncompetentQiitaDeveloperError):
Study.create(User('test@foo.bar'), "Fried Chicken Microbiome",
[], self.info)
def test_create_study_with_not_allowed_key(self):
"""Insert a study with key from _non_info present"""
self.info.update({"email": "wooo@sup.net"})
with self.assertRaises(QiitaDBColumnError):
Study.create(User('test@foo.bar'), "Fried Chicken Microbiome",
[1], self.info)
def test_create_unknown_db_col(self):
""" Insert a study with an info key not in the database"""
self.info["SHOULDNOTBEHERE"] = "BWAHAHAHAHAHA"
with self.assertRaises(QiitaDBColumnError):
Study.create(User('test@foo.bar'), "Fried Chicken Microbiome",
[1], self.info)
def test_delete(self):
title = "Fried chicken microbiome"
# the study is assigned to investigation 1
study = Study.create(User('test@foo.bar'), title, [1], self.info,
Investigation(1))
# sharing with other user
study.share(User("shared@foo.bar"))
study.delete(study.id)
self.assertFalse(study.exists(title))
with self.assertRaises(QiitaDBError):
Study.delete(1)
with self.assertRaises(QiitaDBUnknownIDError):
Study.delete(41)
def test_retrieve_title(self):
self.assertEqual(self.study.title, 'Identification of the Microbiomes'
' for Cannabis Soils')
def test_set_title(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
new.title = "Cannabis soils"
self.assertEqual(new.title, "Cannabis soils")
def test_get_efo(self):
self.assertEqual(self.study.efo, [1])
def test_set_efo(self):
"""Set efo with list efo_id"""
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
new.efo = [3, 4]
self.assertEqual(new.efo, [3, 4])
def test_set_efo_empty(self):
"""Set efo with list efo_id"""
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
with self.assertRaises(IncompetentQiitaDeveloperError):
new.efo = []
def test_set_efo_public(self):
"""Set efo on a public study"""
with self.assertRaises(QiitaDBStatusError):
self.study.efo = 6
def test_portals(self):
self.assertEqual(self.study._portals, ['QIITA'])
def test_retrieve_info(self):
for key, val in viewitems(self.existingexp):
if isinstance(val, QiitaObject):
self.existingexp[key] = val.id
self.assertEqual(self.study.info, self.existingexp)
def test_set_info(self):
"""Set info in a study"""
newinfo = {
"timeseries_type_id": 2,
"metadata_complete": False,
"number_samples_collected": 28,
"lab_person_id": StudyPerson(2),
"vamps_id": 'MBE_111222',
}
self.info['first_contact'] = "6/11/2014"
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
self.infoexp.update(newinfo)
new.info = newinfo
# add missing table cols
self.infoexp["funding"] = None
self.infoexp["spatial_series"] = None
self.infoexp["most_recent_contact"] = None
self.infoexp["reprocess"] = False
self.infoexp["lab_person_id"] = 2
self.infoexp["first_contact"] = datetime(2014, 6, 11)
self.assertEqual(new.info, self.infoexp)
def test_set_info_public(self):
"""Tests for fail if editing info of a public study"""
self.study.info = {"vamps_id": "12321312"}
def test_set_info_public_error(self):
"""Tests for fail if trying to modify timeseries of a public study"""
with self.assertRaises(QiitaDBStatusError):
self.study.info = {"timeseries_type_id": 2}
def test_set_info_disallowed_keys(self):
"""Tests for fail if sending non-info keys in info dict"""
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
with self.assertRaises(QiitaDBColumnError):
new.info = {"email": "fail@fail.com"}
def test_info_empty(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
with self.assertRaises(IncompetentQiitaDeveloperError):
new.info = {}
def test_retrieve_status(self):
self.assertEqual(self.study.status, "private")
def test_retrieve_shared_with(self):
self.assertEqual(self.study.shared_with, ['shared@foo.bar'])
def test_retrieve_pmids(self):
exp = ['123456', '7891011']
self.assertEqual(self.study.pmids, exp)
def test_retrieve_pmids_empty(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
self.assertEqual(new.pmids, [])
def test_pmids_setter(self):
exp = ['123456', '7891011']
self.assertEqual(self.study.pmids, exp)
new_values = ['654321', '1101987']
self.study.pmids = new_values
self.assertEqual(self.study.pmids, new_values)
def test_pmids_setter_typeerror(self):
with self.assertRaises(TypeError):
self.study.pmids = '123456'
def test_retrieve_investigation(self):
self.assertEqual(self.study.investigation, 1)
def test_retrieve_investigation_empty(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
self.assertEqual(new.investigation, None)
def test_retrieve_sample_template(self):
self.assertEqual(self.study.sample_template, 1)
def test_retrieve_data_types(self):
self.assertEqual(self.study.data_types, ['18S'])
def test_retrieve_data_types_none(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
self.assertEqual(new.data_types, [])
def test_retrieve_raw_data(self):
self.assertEqual(self.study.raw_data(), [1])
def test_retrieve_raw_data_none(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
self.assertEqual(new.raw_data(), [])
def test_retrieve_prep_templates(self):
self.assertEqual(self.study.prep_templates(), [1])
def test_retrieve_prep_templates_none(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
self.assertEqual(new.prep_templates(), [])
def test_retrieve_preprocessed_data(self):
self.assertEqual(self.study.preprocessed_data(), [1, 2])
def test_retrieve_preprocessed_data_none(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
self.assertEqual(new.preprocessed_data(), [])
def test_retrieve_processed_data(self):
self.assertEqual(self.study.processed_data(), [1])
def test_retrieve_processed_data_none(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
self.assertEqual(new.processed_data(), [])
def test_add_pmid(self):
self._change_processed_data_status('sandbox')
self.study.add_pmid('4544444')
exp = ['123456', '7891011', '4544444']
self.assertEqual(self.study.pmids, exp)
def test_environmental_packages(self):
obs = self.study.environmental_packages
exp = ['soil', 'plant-associated']
self.assertEqual(sorted(obs), sorted(exp))
def test_environmental_packages_setter(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
obs = new.environmental_packages
exp = []
self.assertEqual(obs, exp)
new_values = ['air', 'human-oral']
new.environmental_packages = new_values
obs = new.environmental_packages
self.assertEqual(sorted(obs), sorted(new_values))
def test_environmental_packages_setter_typeerror(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
with self.assertRaises(TypeError):
new.environmental_packages = 'air'
def test_environmental_packages_setter_valueerror(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
with self.assertRaises(ValueError):
new.environmental_packages = ['air', 'not a package']
def test_environmental_packages_sandboxed(self):
with self.assertRaises(QiitaDBStatusError):
self.study.environmental_packages = ['air']
if __name__ == "__main__":
main()
|
adamrp/qiita
|
qiita_db/test/test_study.py
|
Python
|
bsd-3-clause
| 29,183
|
"""PyPI and direct package downloading"""
import sys, os.path, re, urlparse, urllib, urllib2, shutil, random, socket, cStringIO
import base64
import httplib
from pkg_resources import *
from distutils import log
from distutils.errors import DistutilsError
try:
from hashlib import md5
except ImportError:
from md5 import md5
from fnmatch import translate
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$')
HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I)
# this is here to fix emacs' cruddy broken syntax highlighting
PYPI_MD5 = re.compile(
'<a href="([^"#]+)">([^<]+)</a>\n\s+\\(<a (?:title="MD5 hash"\n\s+)'
'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py',-16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py',-20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base,py_ver,plat
def egg_info_for_url(url):
scheme, server, path, parameters, query, fragment = urlparse.urlparse(url)
base = urllib2.unquote(path.split('/')[-1])
if '#' in base: base, fragment = base.split('#',1)
return base,fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata): yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence = CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(location, basename, metadata,
py_version=None, precedence=SOURCE_DIST, platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version:
for i,p in enumerate(parts[2:]):
if len(p)==5 and p.startswith('py2.'):
return # It's a bdist_dumb, not an sdist -- bail out
for p in range(1,len(parts)+1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence = precedence,
platform = platform
)
REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = map(str.strip, rel.lower().split(','))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urlparse.urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos!=-1:
match = HREF.search(page,pos)
if match:
yield urlparse.urljoin(url, htmldecode(match.group(1)))
user_agent = "Python-urllib/%s distribute/%s" % (
sys.version[:3], require('distribute')[0].version
)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(self, index_url="http://pypi.python.org/simple", hosts=('*',),
*args, **kw
):
Environment.__init__(self,*args,**kw)
self.index_url = index_url + "/"[:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate,hosts))).match
self.to_scan = []
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
map(self.add, dists)
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url)
if f is None: return
self.fetched_urls[url] = self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str): # We are in Python 3 and got bytes. We want str.
if isinstance(f, urllib2.HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urlparse.urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f,'code',None)!=404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path,item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
map(self.add, dists)
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
if (s and s.group(1).lower()=='file') or self.allows(urlparse.urlparse(url)[1]):
return True
msg = "\nLink to % s ***BLOCKED*** by --allow-hosts\n"
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
for item in search_path:
if os.path.isdir(item):
for entry in os.listdir(item):
if entry.endswith('.egg-link'):
self.scan_egg_link(item, entry)
def scan_egg_link(self, path, entry):
lines = filter(None, map(str.strip, open(os.path.join(path, entry))))
if len(lines)==2:
for dist in find_distributions(os.path.join(path, lines[0])):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self,url,page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = map(
urllib2.unquote, link[len(self.index_url):].split('/')
)
if len(parts)==2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(),{})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan( urlparse.urljoin(url, htmldecode(match.group(1))) )
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url+='#egg=%s-%s' % (pkg,ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1,3,2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg: self.warn(msg,*args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name+'/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name+'/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key,())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan(); self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement,installer)
def check_md5(self, cs, info, filename, tfp):
if re.match('md5=[0-9a-f]{32}$', info):
self.debug("Validating md5 checksum for %s", filename)
if cs.hexdigest()<>info[4:]:
tfp.close()
os.unlink(filename)
raise DistutilsError(
"MD5 validation failed for "+os.path.basename(filename)+
"; possible download problem?"
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
map(self.scan_url, self.to_scan)
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec,Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found,fragment,tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
try:
spec = Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" %
(spec,)
)
return getattr(self.fetch_distribution(spec, tmpdir),'location',None)
def fetch_distribution(self,
requirement, tmpdir, force_scan=False, source=False, develop_ok=False,
local_index=None
):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence==DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn("Skipping development or system egg: %s",dist)
skipped[dist] = 1
continue
if dist in req and (dist.precedence<=SOURCE_DIST or not source):
self.info("Best match: %s", dist)
return dist.clone(
location=self.download(dist.location, tmpdir)
)
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if local_index is not None:
dist = dist or find(requirement, local_index)
if dist is None and self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
return dist
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement,tmpdir,force_scan,source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists)==1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename=dst
file = open(os.path.join(tmpdir, 'setup.py'), 'w')
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
file.close()
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment,dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp, tfp, info = None, None, None
try:
if '#' in url:
url, info = url.split('#', 1)
fp = self.open_url(url)
if isinstance(fp, urllib2.HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code,fp.msg)
)
cs = md5()
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
content_length = headers.get("Content-Length")
size = int(content_length)
self.reporthook(url, filename, blocknum, bs, size)
tfp = open(filename,'wb')
while True:
block = fp.read(bs)
if block:
cs.update(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
if info: self.check_md5(cs, info, filename, tfp)
return headers
finally:
if fp: fp.close()
if tfp: tfp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url)
except (ValueError, httplib.InvalidURL), v:
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib2.HTTPError, v:
return v
except urllib2.URLError, v:
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except httplib.BadStatusLine, v:
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError('%s returned a bad status line. '
'The server might be down, %s' % \
(url, v.line))
except httplib.HTTPException, v:
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v))
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name = filter(None,urlparse.urlparse(url)[2].split('/'))
if name:
name = name[-1]
while '..' in name:
name = name.replace('..','.').replace('\\','_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir,name)
# Download the file
#
if scheme=='svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme=='file':
return urllib.url2pathname(urlparse.urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type','').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at "+url)
def _download_svn(self, url, filename):
url = url.split('#',1)[0] # remove any fragment for svn's sake
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout -q %s %s" % (url, filename))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def uchr(c):
if not isinstance(c, int):
return c
if c>255: return unichr(c)
return chr(c)
def decode_entity(match):
what = match.group(1)
if what.startswith('#x'):
what = int(what[2:], 16)
elif what.startswith('#'):
what = int(what[1:])
else:
from htmlentitydefs import name2codepoint
what = name2codepoint.get(what, match.group(0))
return uchr(what)
def htmldecode(text):
"""Decode HTML entities in the given text."""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> _encode_auth('username%3Apassword')
u'dXNlcm5hbWU6cGFzc3dvcmQ='
"""
auth_s = urllib2.unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.rstrip()
def open_with_auth(url):
"""Open a urllib2 request, handling HTTP authentication"""
scheme, netloc, path, params, query, frag = urlparse.urlparse(url)
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if sys.platform == 'darwin':
if netloc.endswith(':'):
raise httplib.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, host = urllib2.splituser(netloc)
else:
auth = None
if auth:
auth = "Basic " + _encode_auth(auth)
new_url = urlparse.urlunparse((scheme,host,path,params,query,frag))
request = urllib2.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib2.Request(url)
request.add_header('User-Agent', user_agent)
fp = urllib2.urlopen(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urlparse.urlparse(fp.url)
if s2==scheme and h2==host:
fp.url = urlparse.urlunparse((s2,netloc,path2,param2,query2,frag2))
return fp
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urlparse.urlparse(url)
filename = urllib.url2pathname(path)
if os.path.isfile(filename):
return urllib2.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
if f=='index.html':
fp = open(os.path.join(filename,f),'rb')
body = fp.read()
fp.close()
break
elif os.path.isdir(os.path.join(filename,f)):
f+='/'
files.append("<a href=%r>%s</a>" % (f,f))
else:
body = ("<html><head><title>%s</title>" % url) + \
"</head><body>%s</body></html>" % '\n'.join(files)
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
return urllib2.HTTPError(url, status, message,
{'content-type':'text/html'}, cStringIO.StringIO(body))
# this line is a kludge to keep the trailing blank lines for pje's editor
|
xbianonpi/xbian-package-development
|
content/usr/local/lib/python2.7/dist-packages/distribute-0.6.30-py2.7.egg/setuptools/package_index.py
|
Python
|
gpl-2.0
| 31,252
|
import collections
import json
import re
import urllib
from bs4 import BeautifulSoup
import HTMLParser
def get_properties (message):
"""
Processes a given chat message to identify particular properties: mentions,
emoticons and links. See the README.md file for more detailed documentation.
Args:
message (str): the chat message to process
Returns:
str: json data describing the properties of the message
Raises:
TypeError: if message parameter is not a string
Considerations:
Two approaches involving a tradeoff between performance and readability:
1. Scan message *for each* property's regular expression (more readable code)
2. Scan message *once* using a more complex regular expression (better performance)
I choose to implement #2 so that the solution scales better
"""
# validate message input data
if type(message) != str:
raise TypeError("message parameter must be a string")
# data structure that we're going to return (JSON)
properties = collections.defaultdict(list)
# regular expressions used to extract properties from message
REG_EXP = {
"mentions" : r"@\w+",
"emoticons" : r"\(\w+?\)",
"links" : r"https?://[^\s]+"
}
"""
For simplicity, assume that 'links' use the http(s) URL scheme.
The format of a URL can be very involved. A more correct, general approach
would be to include multiple schemes, follow the RFC for URL formats (RFC 3986, 1808), etc.
This would yield a complex regular expression outside the scope of this exercise.
"""
# regular expression pattern used for search
pattern = r"%s|%s|%s" % (REG_EXP["mentions"], REG_EXP["emoticons"], REG_EXP["links"])
for match in re.findall(pattern, message):
# setup key/value variables for properties dictionary
key, value = None, None
if re.match(REG_EXP["mentions"], match):
key = "mentions"
value = match[1:] # "@eric"
elif re.match(REG_EXP["emoticons"], match):
key = "emoticons"
value = match[1:-1] # "(smiley)"
elif re.match(REG_EXP["links"], match):
url = match # "http://twitter.com/"
# get title of URL
title = None
try:
f = urllib.urlopen(url)
html = f.read()
title = BeautifulSoup(html).title.string
except IOError as e:
sys.stderr.write("I/O error({0}): {1}\n".format(e.errno, e.strerror))
continue
except HTMLParser.HTMLParseError as e:
sys.stderr.write("HTMLParseError error({0}): {1}\n".format(e.errno, e.strerror))
continue
finally:
f.close()
key = "links"
value = {
"url" : url,
"title" : title
}
else:
sys.stderr.write("warning: the following match wasn't processed: {0}\n".format(match))
continue
# store processed key/value variables in properties *defaultdict*
properties[key].append(value)
return json.dumps(properties)
|
erichaase/atlassian
|
atlassian/hip_chat.py
|
Python
|
mit
| 3,247
|
#!/usr/bin/python
# license:BSD-3-Clause
# copyright-holders:Olivier Galibert
from __future__ import print_function
USAGE = """
Usage:
%s h8.lst <mode> <type> h8.inc (mode = s/d, type = o/h/s20/s26)
"""
import sys
def name_to_type(name):
if name == "o":
return 0
if name == "h":
return 1
if name == "s20":
return 2
if name == "s26":
return 3
sys.stderr.write("Unknown chip type name %s\n" % name)
sys.exit(1)
def type_to_device(dtype, mode):
if mode == 's':
if dtype == 0:
return "h8_device"
if dtype == 1:
return "h8h_device"
if dtype == 2:
return "h8s2000_device"
return "h8s2600_device"
else:
if dtype == 0:
return "h8_disassembler"
if dtype == 1:
return "h8h_disassembler"
if dtype == 2:
return "h8s2000_disassembler"
return "h8s2600_disassembler"
def hexsplit(str):
res = []
for i in range(0, len(str), 2):
res.append(int(str[i:i+2], 16))
return res
def has_memory(ins):
for s in ["read", "write", "sp_push", "sp_pop", "sp32_push", "sp32_pop", "fetch(", "prefetch_start(", "prefetch(", "prefetch_noirq("]:
if s in ins:
return True
return False
def has_eat(ins):
if "eat-all-cycles" in ins:
return True
return False
def save_full_one(f, t, name, source):
print("void %s::%s_full()" % (t, name), file=f)
print("{", file=f)
substate = 1
for line in source:
if has_memory(line):
print("\tif(icount <= bcount) { inst_substate = %d; return; }" % substate, file=f)
print(line, file=f)
substate += 1
elif has_eat(line):
print("\tif(icount) { icount = bcount; } inst_substate = %d; return;" % substate, file=f)
substate += 1
else:
print(line, file=f)
print("}", file=f)
print("", file=f)
def save_partial_one(f, t, name, source):
print("void %s::%s_partial()" % (t, name), file=f)
print("{", file=f)
print("switch(inst_substate) {", file=f)
print("case 0:", file=f)
substate = 1
for line in source:
if has_memory(line):
print("\tif(icount <= bcount) { inst_substate = %d; return; }" % substate, file=f)
print("\t[[fallthrough]];", file=f)
print("case %d:;" % substate, file=f)
print(line, file=f)
substate += 1
elif has_eat(line):
print("\tif(icount) { icount = bcount; } inst_substate = %d; return;" % substate, file=f)
print("case %d:;" % substate, file=f)
substate += 1
else:
print(line, file=f)
print("\tbreak;", file=f)
print("}", file=f)
print("\tinst_substate = 0;", file=f)
print("}", file=f)
print("", file=f)
class Hash:
def __init__(self, premask):
self.mask = 0x00
self.enabled = False
self.premask = premask
self.d = {}
def get(self, val, premask):
if val in self.d:
h = self.d[val]
if h.premask != premask:
sys.stderr.write("Premask conflict\n")
sys.exit(1)
return h
h = Hash(premask)
self.d[val] = h
return h
def set(self, val, opc):
if val in self.d:
sys.stderr.write("Collision on %s\n" % opc.description())
sys.exit(1)
self.d[val] = opc
class Opcode:
def __init__(self, val, mask, skip, name, am1, am2, otype, dtype):
self.name = name
self.val = hexsplit(val)
self.mask = hexsplit(mask)
self.skip = int(skip)
self.am1 = am1
self.am2 = am2
self.source = []
self.otype = otype
self.enabled = otype == -1 or (otype == 0 and dtype == 0) or (otype != 0 and dtype >= otype)
self.needed = self.enabled and (otype == dtype or (otype == -1 and dtype == 0))
if dtype == 0 and (am1 == "r16l" or am2 == "r16l"):
self.mask[len(self.mask) - 1] |= 0x08
if dtype == 0 and (am1 == "r16h" or am2 == "r16h"):
self.mask[len(self.mask) - 1] |= 0x80
extra_words = 0
if (am1 == "abs16" or am2 == "abs16" or am1 == "abs16e" or am1 == "abs24e") and self.skip == 0:
extra_words += 1
if (am1 == "abs32" or am2 == "abs32") and self.skip == 0:
extra_words += 2
if am1 == "imm16" or am1 == "rel16" or am1 == "r16d16h" or am2 == "r16d16h" or am1 == "r32d16h" or am2 == "r32d16h":
extra_words += 1
if am1 == "imm32" or am1 == "r32d32hh" or am2 == "r32d32hh":
extra_words += 2
self.extra_words = extra_words
base_offset = len(self.val)/2 + self.skip
for i in range(0, extra_words):
self.source.append("\tfetch(%d);\n" % (i+base_offset))
def description(self):
return "%s %s %s" % (self.name, self.am1, self.am2)
def add_source_line(self, line):
self.source.append(line)
def is_dispatch(self):
return False
def function_name(self):
n = self.name.replace(".", "_")
if self.am1 != "-":
n = n + "_" + self.am1
if self.am2 != "-":
n = n + "_" + self.am2
return n
def save_dasm(self, f):
if len(self.mask) == 2:
mask = (self.mask[0] << 8) | self.mask[1]
val = (self.val[0] << 8) | self.val[1]
mask2 = 0
val2 = 0
slot = 0
elif len(self.mask) == 4:
mask = (self.mask[0] << 24) | (self.mask[1] << 16) | (self.mask[2] << 8) | self.mask[3]
val = (self.val[0] << 24) | (self.val[1] << 16) | (self.val[2] << 8) | self.val[3]
mask2 = 0
val2 = 0
slot = self.skip + 1
else:
mask = (self.mask[2] << 24) | (self.mask[3] << 16) | (self.mask[4] << 8) | self.mask[5]
val = (self.val[2] << 24) | (self.val[3] << 16) | (self.val[4] << 8) | self.val[5]
mask2 = (self.mask[0] << 8) | self.mask[1]
val2 = (self.val[0] << 8) | self.val[1]
slot = 4
size = len(self.val) + 2*self.skip + 2*self.extra_words
if self.name == "jsr" or self.name == "bsr":
flags = "%d | STEP_OVER" % size
elif self.name == "rts" or self.name == "rte":
flags = "%d | STEP_OUT" % size
else:
flags = "%d" % size
print("\t{ %d, 0x%08x, 0x%08x, 0x%04x, 0x%04x, \"%s\", DASM_%s, DASM_%s, %s }, // %s" % ( slot, val, mask, val2, mask2, self.name, self.am1 if self.am1 != "-" else "none", self.am2 if self.am2 != "-" else "none", flags, "needed" if self.needed else "inherited"), file=f)
class Special:
def __init__(self, val, name, otype, dtype):
self.name = name
self.val = int(val, 16)
self.enabled = otype == -1 or (otype == 0 and dtype == 0) or (otype != 0 and dtype >= otype)
self.needed = otype == dtype or (otype == -1 and dtype == 0)
self.source = []
def add_source_line(self, line):
self.source.append(line)
class Macro:
def __init__(self, tokens):
self.name = tokens[1]
self.params = []
for i in range(2, len(tokens)):
self.params.append(tokens[i])
self.source = []
def add_source_line(self, line):
self.source.append(line)
def apply(self, target, tokens):
values = []
if len(self.params) > 1:
for i in range(0, len(self.params)-1):
values.append(tokens[i+1])
lval = ""
for i in range(len(self.params)-1, len(tokens)-1):
if lval != "":
lval += " "
lval = lval + tokens[i+1]
values.append(lval)
for i in range(0, len(self.source)):
line = self.source[i]
for j in range(0, len(self.params)):
line = line.replace(self.params[j], values[j])
target.add_source_line(line)
class DispatchStep:
def __init__(self, id, pos, opc):
self.id = id
self.pos = pos
self.name = ""
self.enabled = False
self.mask = opc.mask[pos-1]
for i in range(0, pos):
self.name += "%02x" % opc.val[i]
if pos == 2:
self.skip = opc.skip
else:
self.skip = 0
def is_dispatch(self):
return True
def source(self):
start = self.pos // 2
end = start + self.skip
s = []
for i in range(start, end+1):
s.append("\tIR[%d] = fetch();" % i)
s.append("\tinst_state = 0x%x0000 | IR[%d];" % (self.id, end))
return s
class OpcodeList:
def __init__(self, fname, dtype):
self.opcode_info = []
self.dispatch_info = []
self.states_info = []
self.dispatch = {}
self.macros = {}
try:
f = open(fname, "r")
except Exception:
err = sys.exc_info()[1]
sys.stderr.write("Cannot read opcodes file %s [%s]\n" % (fname, err))
sys.exit(1)
inf = None
for line in f:
if line.startswith("#"):
continue
line = line.rstrip()
if not line:
continue
if line.startswith(" ") or line.startswith("\t"):
if inf is not None:
# append instruction to last opcode, maybe expand a macro
tokens = line.split()
if tokens[0] in self.macros:
self.macros[tokens[0]].apply(inf, tokens)
else:
inf.add_source_line(line)
else:
# New opcode
tokens = line.split()
if tokens[0] == "macro":
inf = Macro(tokens)
self.macros[inf.name] = inf
elif len(tokens) == 2 or len(tokens) == 3:
if len(tokens) >= 3:
otype = name_to_type(tokens[2])
else:
otype = -1
inf = Special(tokens[0], tokens[1], otype, dtype)
self.states_info.append(inf)
else:
if len(tokens) >= 7:
otype = name_to_type(tokens[6])
else:
otype = -1
if otype == -1 or dtype == 0 or (otype != 0 and dtype != 0):
inf = Opcode(tokens[0], tokens[1], tokens[2], tokens[3], tokens[4], tokens[5], otype, dtype)
self.opcode_info.append(inf)
else:
inf = None
def get(self, i):
if i in self.dispatch:
return self.dispatch[i]
h = Hash(0)
self.dispatch[i] = h
return h
def build_dispatch(self):
for opc in self.opcode_info:
for i in range(0, len(opc.val)):
v = opc.val[i]
if i == 0:
h = self.get(0)
if opc.enabled:
h.mask = h.mask | opc.mask[i]
h.enabled = True
if (i & 1) == 0:
h = h.get(v, opc.mask[i])
elif i == len(opc.val)-1:
if opc.enabled:
h.set(v, opc)
else:
if v in h.d:
d = h.d[v]
if not d.is_dispatch():
sys.stderr.write("Collision on %s\n" % opc.description())
sys.exit(1)
if opc.enabled:
d.enabled = True
h = self.get(d.id)
else:
d = DispatchStep(len(self.dispatch_info)+2, i+1, opc)
self.dispatch_info.append(d)
if opc.enabled:
d.enabled = True
h.set(v, d)
h = self.get(d.id)
def save_dasm(self, f, dname):
print("const %s::disasm_entry %s::disasm_entries[] = {" % (dname, dname), file=f)
for opc in self.opcode_info:
if opc.enabled:
opc.save_dasm(f)
print("\t{ 0, 0, 0, 0, 0, \"illegal\", 0, 0, 2 },", file=f)
print("};", file=f)
print("", file=f)
def save_opcodes(self, f, t):
for opc in self.opcode_info:
if opc.needed:
save_full_one(f, t, opc.function_name(), opc.source)
save_partial_one(f, t, opc.function_name(), opc.source)
for sta in self.states_info:
if sta.needed:
save_full_one(f, t, "state_" + sta.name, sta.source)
save_partial_one(f, t, "state_" + sta.name, sta.source)
def save_dispatch(self, f, t):
for dsp in self.dispatch_info:
save_full_one(f, t, "dispatch_" + dsp.name, dsp.source())
save_partial_one(f, t, "dispatch_" + dsp.name, dsp.source())
def save_exec(self, f, t, dtype, v):
print("void %s::do_exec_%s()" % (t, v), file=f)
print("{", file=f)
print("\tswitch(inst_state >> 16) {", file=f)
for i in range(0, len(self.dispatch_info)+2):
if i == 1:
print("\tcase 0x01: {", file=f)
print("\t\tswitch(inst_state & 0xffff) {", file=f)
for sta in self.states_info:
if sta.enabled:
print("\t\tcase 0x%02x: state_%s_%s(); break;" % (sta.val & 0xffff, sta.name, v), file=f)
print("\t\t}", file=f)
print("\t\tbreak;", file=f)
print("\t}", file=f)
else:
if i == 0 or self.dispatch_info[i-2].enabled:
print("\tcase 0x%02x: {" % i, file=f)
h = self.get(i)
print("\t\tswitch((inst_state >> 8) & 0x%02x) {" % h.mask, file=f)
for val, h2 in sorted(h.d.items()):
if h2.enabled:
fmask = h2.premask | (h.mask ^ 0xff)
c = ""
s = 0
while s < 0x100:
c += "case 0x%02x: " % (val | s)
s += 1
while s & fmask:
s += s & fmask
print("\t\t%s{" % c, file=f)
if h2.mask == 0x00:
n = h2.d[0]
if n.is_dispatch():
print("\t\t\tdispatch_%s_%s();" % (n.name, v), file=f)
else:
print("\t\t\t%s_%s();" % (n.function_name(), v), file=f)
print("\t\t\tbreak;", file=f)
else:
print("\t\t\tswitch(inst_state & 0x%02x) {" % h2.mask, file=f)
if i == 0:
mpos = 1
else:
mpos = self.dispatch_info[i-2].pos + 1
for val2, n in sorted(h2.d.items()):
if n.enabled:
fmask = h2.mask ^ 0xff
if n.is_dispatch():
fmask = fmask | n.mask
else:
fmask = fmask | n.mask[mpos]
c = ""
s = 0
while s < 0x100:
c += "case 0x%02x: " % (val2 | s)
s += 1
while s & fmask:
s += s & fmask
if n.is_dispatch():
print("\t\t\t%sdispatch_%s_%s(); break;" % (c, n.name, v), file=f)
else:
print("\t\t\t%s%s_%s(); break;" % (c, n.function_name(), v), file=f)
print("\t\t\tdefault: illegal(); break;", file=f)
print("\t\t\t}", file=f)
print("\t\t\tbreak;", file=f)
print("\t\t}", file=f)
print("\t\tdefault: illegal(); break;", file=f)
print("\t\t}", file=f)
print("\t\tbreak;", file=f)
print("\t}", file=f)
print("\t}", file=f)
print("}", file=f)
def main(argv):
if len(argv) != 5:
print(USAGE % argv[0])
return 1
mode = argv[2]
dtype = name_to_type(argv[3])
dname = type_to_device(dtype, mode)
opcodes = OpcodeList(argv[1], dtype)
try:
f = open(argv[4], "w")
except Exception:
err = sys.exc_info()[1]
sys.stderr.write("cannot write file %s [%s]\n" % (argv[4], err))
sys.exit(1)
if mode == 's':
opcodes.build_dispatch()
opcodes.save_opcodes(f, dname)
if dtype == 0:
opcodes.save_dispatch(f, dname)
opcodes.save_exec(f, dname, dtype, "full")
opcodes.save_exec(f, dname, dtype, "partial")
else:
opcodes.save_dasm(f, dname)
f.close()
# ======================================================================
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
johnparker007/mame
|
src/devices/cpu/h8/h8make.py
|
Python
|
gpl-2.0
| 18,115
|
import numpy as np
import numpy.linalg as npl
import json as json
import typedbytes as tb
from utils import Block_Mapper
from quantreg import quantreg_ipm
class Unif_Samp_Mapper(Block_Mapper):
"""
Random sampling uniformly
"""
def __init__(self):
import os
Block_Mapper.__init__(self, 32768)
self.nx = float(self.params["nx"])
self.ss = float(self.params["s"])
self.size = float(self.params["num_row"])
def parse(self, row):
return [float(v) for v in row.split()]
def process(self):
As = np.array(self.data)
m, n = As.shape
p = np.ones(m) / self.size * self.ss
for k in xrange(self.nx):
coins = np.random.rand(m)
ii = coins < p
yield k, np.dot(np.diag(1/p[ii]), As[ii,]).tolist()
class Solve_Reducer:
"""
Solve the subproblem
"""
def __init__(self):
self.tau_vec = [0.5, 0.75, 0.95]
self.ntau = len(self.tau_vec)
def __call__(self, key, values):
#SAb = np.array([v for v in values])
data = []
for v in values:
data += v
SAb = np.array(data)
m, n = SAb.shape
x = np.zeros((n-1, self.ntau))
for i in range(self.ntau):
x[:,i] = quantreg_ipm(SAb[:,:n-1], SAb[:, n-1], self.tau_vec[i])
key = [key, m]
yield key, x.T.tolist()
if __name__ == '__main__':
import dumbo
dumbo.run(Unif_Samp_Mapper, Solve_Reducer)
|
chocjy/randomized-quantile-regression-solvers
|
hadoop/src/quantreg_unifsamp_solve.py
|
Python
|
apache-2.0
| 1,499
|
#!/usr/bin/python
import argparse
import subprocess
import os
import shutil
def run(cmd):
try:
p = subprocess.Popen(cmd, shell=True)
p.communicate()
status = p.returncode
if status:
raise
except:
print "An error has occured while running the following shell command:"
print cmd
exit(1)
def main():
parser = argparse.ArgumentParser(description='Convert and upload change log to google code wiki and create PDF file')
parser.add_argument('-i', '--input', default='../../ChangeLog', help='TIGL change log file')
parser.add_argument('--local', action='store_true', help='only local files (no upload)')
args = parser.parse_args()
# create valid RST file
print "Convert to RST..."
rstfile = 'ChangeLog.rst'
run('python changeLogToRST.py -i %s -o %s' % (args.input, rstfile))
# convert to wiki format ('pip install wikir' necessary)
print "Convert to wiki syntax..."
wikifile = 'LastChanges.wiki'
wikifile_abspath = os.path.abspath(wikifile)
run('wikir ChangeLog.rst > %s' % wikifile)
# add sub-headline to wikie
print 'Add sub-headline to wiki file...'
f = open(wikifile, 'r')
lines = f.readlines()
f.close()
f = open(wikifile, 'w')
f.write("#summary log of the last changes made in TIGL.\n")
for line in lines:
f.write(line)
f.close()
if not args.local:
# check out tigl.wiki
print "Check out tigl.wiki repo..."
if os.path.exists('tigl.wiki'):
shutil.rmtree('tigl.wiki')
run('git clone https://code.google.com/p/tigl.wiki/')
# overwrite LastChanges.wiki
print 'Overwrite LastChanges.wiki...'
os.chdir('tigl.wiki')
shutil.move(wikifile_abspath, wikifile)
# push repo
print 'Upload changed LastChanges.wiki...'
run('git add %s' % wikifile)
run("git commit -m 'Updated ChangeLog'")
run("git push")
os.chdir('../')
# Clean up
print 'Clean up...'
os.remove(rstfile)
if not args.local:
shutil.rmtree('tigl.wiki')
else:
os.remove(wikifile)
if __name__ == "__main__":
main()
|
codingpoets/tigl
|
misc/createChangeLog/changeLogToWikiAndDoc.py
|
Python
|
apache-2.0
| 2,221
|
# Test process monitoring
#
# Copyright (C) 2020 Simon Dobson
#
# This file is part of epydemic, epidemic network simulations in Python.
#
# epydemic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# epydemic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with epydemic. If not, see <http://www.gnu.org/licenses/gpl.html>.
import unittest
import os
from tempfile import NamedTemporaryFile
import epyc
import networkx
from epydemic import *
class MonitoredSIR(SIR, Monitor):
def __init__(self):
super().__init__()
class MonitorTest(unittest.TestCase):
def testSimple( self ):
'''Test we capture the right time series.'''
m = MonitoredSIR()
#m.setMaximumTime(100)
e = StochasticDynamics(m, networkx.erdos_renyi_graph(1000, 20.0 / 1000))
param = dict()
param[SIR.P_INFECTED] = 0.01
param[SIR.P_INFECT] = 0.3
param[SIR.P_REMOVE] = 1.0
param[Monitor.DELTA] = 1.0
rc = e.set(param).run(fatal=True)
self.assertSetEqual(set(rc[epyc.Experiment.RESULTS].keys()),
set([Monitor.timeSeriesForLocus(SIR.SI),
Monitor.timeSeriesForLocus(SIR.INFECTED),
Monitor.OBSERVATIONS,
SIR.SUSCEPTIBLE,
SIR.INFECTED,
SIR.REMOVED]))
elapsed = rc[epyc.Experiment.METADATA][epyc.Experiment.ELAPSED_TIME]
n = len(rc[epyc.Experiment.RESULTS][Monitor.OBSERVATIONS])
self.assertGreaterEqual(n, int(elapsed / param[Monitor.DELTA]))
for k in [SIR.SI, SIR.INFECTED]:
self.assertEqual(len(rc[epyc.Experiment.RESULTS][Monitor.timeSeriesForLocus(k)]), n)
def testHDF5(self):
'''Test we can save and retrieve the time series as HDF5.'''
tf = NamedTemporaryFile()
tf.close()
fn = tf.name
#fn = 'test.h5'
try:
nb = epyc.HDF5LabNotebook(fn, create=True)
lab = epyc.Lab(nb)
# run the experiment
m = MonitoredSIR()
m.setMaximumTime(100)
e = StochasticDynamics(m, networkx.erdos_renyi_graph(1000, 5.0 / 1000))
lab[SIR.P_INFECTED] = 0.01
lab[SIR.P_INFECT] = 0.002
lab[SIR.P_REMOVE] = 0.002
lab[Monitor.DELTA] = 1.0
rc = lab.runExperiment(e)
df = lab.dataframe()
# check we read back in correctly
with epyc.HDF5LabNotebook(fn).open() as nb1:
df1 = nb1.dataframe()
r = df.iloc[0]
r1 = df1.iloc[0]
for f in [Monitor.OBSERVATIONS, Monitor.timeSeriesForLocus(SIR.SI), Monitor.timeSeriesForLocus(SIR.INFECTED)]:
self.assertCountEqual(r[f], r1[f])
finally:
try:
os.remove(fn)
#pass
except OSError:
pass
if __name__ == '__main__':
unittest.main()
|
simoninireland/epydemic
|
test/test_monitor.py
|
Python
|
gpl-3.0
| 3,479
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides base classes for XML->object I{unmarshalling}.
"""
from logging import getLogger
from suds import *
from suds.umx import *
from suds.umx.attrlist import AttrList
from suds.sax.text import Text
from suds.sudsobject import Factory, merge
log = getLogger(__name__)
reserved = { 'class':'cls', 'def':'dfn', }
class Core:
"""
The abstract XML I{node} unmarshaller. This class provides the
I{core} unmarshalling functionality.
"""
def process(self, content):
"""
Process an object graph representation of the xml I{node}.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A suds object.
@rtype: L{Object}
"""
self.reset()
return self.append(content)
def append(self, content):
"""
Process the specified node and convert the XML document into
a I{suds} L{object}.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A I{append-result} tuple as: (L{Object}, I{value})
@rtype: I{append-result}
@note: This is not the proper entry point.
@see: L{process()}
"""
self.start(content)
self.append_attributes(content)
self.append_children(content)
self.append_text(content)
self.end(content)
return self.postprocess(content)
def postprocess(self, content):
"""
Perform final processing of the resulting data structure as follows:
- Mixed values (children and text) will have a result of the I{content.node}.
- Simi-simple values (attributes, no-children and text) will have a result of a
property object.
- Simple values (no-attributes, no-children with text nodes) will have a string
result equal to the value of the content.node.getText().
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: The post-processed result.
@rtype: I{any}
"""
node = content.node
if len(node.children) and node.hasText():
return node
attributes = AttrList(node.attributes)
if attributes.rlen() and \
not len(node.children) and \
node.hasText():
p = Factory.property(node.name, node.getText())
return merge(content.data, p)
if len(content.data):
return content.data
lang = attributes.lang()
if content.node.isnil():
return None
if not len(node.children) and content.text is None:
if self.nillable(content):
return None
else:
return Text('', lang=lang)
if isinstance(content.text, str):
return Text(content.text, lang=lang)
else:
return content.text
def append_attributes(self, content):
"""
Append attribute nodes into L{Content.data}.
Attributes in the I{schema} or I{xml} namespaces are skipped.
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
attributes = AttrList(content.node.attributes)
for attr in attributes.real():
name = attr.name
value = attr.value
self.append_attribute(name, value, content)
def append_attribute(self, name, value, content):
"""
Append an attribute name/value into L{Content.data}.
@param name: The attribute name
@type name: basestring
@param value: The attribute's value
@type value: basestring
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
key = name
key = '_%s' % reserved.get(key, key)
setattr(content.data, key, value)
def append_children(self, content):
"""
Append child nodes into L{Content.data}
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
for child in content.node:
cont = Content(child)
cval = self.append(cont)
key = reserved.get(child.name, child.name)
if key in content.data:
v = getattr(content.data, key)
if isinstance(v, list):
v.append(cval)
else:
setattr(content.data, key, [v, cval])
continue
if self.unbounded(cont):
if cval is None:
setattr(content.data, key, [])
else:
setattr(content.data, key, [cval,])
else:
setattr(content.data, key, cval)
def append_text(self, content):
"""
Append text nodes into L{Content.data}
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
if content.node.hasText():
content.text = content.node.getText()
def reset(self):
pass
def start(self, content):
"""
Processing on I{node} has started. Build and return
the proper object.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A subclass of Object.
@rtype: L{Object}
"""
content.data = Factory.object(content.node.name)
def end(self, content):
"""
Processing on I{node} has ended.
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
pass
def bounded(self, content):
"""
Get whether the content is bounded (not a list).
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if bounded, else False
@rtype: boolean
'"""
return ( not self.unbounded(content) )
def unbounded(self, content):
"""
Get whether the object is unbounded (a list).
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if unbounded, else False
@rtype: boolean
'"""
return False
def nillable(self, content):
"""
Get whether the object is nillable.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if nillable, else False
@rtype: boolean
'"""
return False
|
obsoleter/suds
|
suds/umx/core.py
|
Python
|
lgpl-3.0
| 7,783
|
####
#### Steps for operating on the various forms and their results.
####
from behave import *
###
### radio button click
###
@given('I click the "{id}" radio button')
def step_impl(context, id):
webelt = context.browser.find_element_by_id(id)
webelt.click()
###
### Submission.
###
## Submit analyze phenotype.
@when('I submit analyze phenotype')
def step_impl(context):
webelt = context.browser.find_element_by_id('analyze-submit')
webelt.click()
## Submit navbar search.
@given('I submit navbar search')
def step_impl(context):
#print(context.browser.title)
webelt = context.browser.find_element_by_id('search_form')
webelt.submit()
###
### Example for input for a possible text area form.
###
@given('I input "{text}" into the textarea "{eid}"')
def step_impl(context, text, eid):
webelt = context.browser.find_element_by_id(eid)
webelt.send_keys(text)
@given('I input the following text into the textarea "{eid}"')
def step_impl(context, eid):
input_box_text = context.text
webelt = context.browser.find_element_by_id(eid)
webelt.send_keys(input_box_text)
@when('I submit the form by clicking XPath "{xpath}"')
def step_impl(context, xpath):
## xpath like "/html/body/div[2]/div[4]/div/div/form/div[2]/button"
webelt = context.browser.find_element_by_xpath(xpath)
webelt.click()
|
kshefchek/monarch-app
|
tests/behave/steps/selenium-forms.py
|
Python
|
bsd-3-clause
| 1,359
|
__author__ = 'Guorong Xu<g1xu@ucsd.edu>'
import sys
import subprocess
import PBSTracker
import YamlFileReader
root_dir = "/shared/workspace/ChiPSeqPipeline"
data_dir = "/shared/workspace/data_archive/ChiPSeq"
## run all analysis from download, alignment, counting and differential calculation.
def run_analysis(yaml_file):
documents = YamlFileReader.parse_yaml_file(yaml_file)
workflow = documents.get("workflow")
project_name = documents.get("project")
analysis_steps = documents.get("analysis")
s3_output_files_address = documents.get("upload")
style = documents.get("style")
genome = documents.get("genome")
sample_list = documents.get("sample")
## Download files from s3 and make a design group file.
download_files(workflow, project_name, sample_list)
if "fastqc" in analysis_steps:
run_fastqc(workflow, project_name, sample_list)
if "alignment" in analysis_steps:
run_alignment(workflow, project_name, sample_list)
if "make_tag_directory" in analysis_steps:
make_tag_directory(workflow, project_name, sample_list)
if "make_UCSC_file" in analysis_steps:
make_UCSC_file(workflow, project_name, sample_list)
if "find_peaks" in analysis_steps:
find_peaks(workflow, project_name, sample_list, style)
if "annotate_peaks" in analysis_steps:
annotate_peaks(workflow, project_name, sample_list, style, genome)
if "pos2bed" in analysis_steps:
pos2bed(workflow, project_name, sample_list, style)
if "find_motifs_genome" in analysis_steps:
find_motifs_genome(workflow, project_name, sample_list, style, genome)
## Upload resulting files to s3.
upload_files(workflow, project_name, s3_output_files_address)
print "======================================================"
print "The processing of the project \"" + project_name + "\" is done!"
print "======================================================"
## download file from s3
def download_files(workflow, project_name, sample_list):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing download files..."
##copying data from s3 to local drive
for sample_file in sample_list:
if sample_file.get("filename").find(",") > -1:
sample_1 = sample_file.get("filename")[:sample_file.get("filename").find(",")]
sample_2 = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:]
subprocess.call(["qsub", workspace + "download.sh", sample_file.get("download"),
sample_1, sample_dir])
subprocess.call(["qsub", workspace + "download.sh", sample_file.get("download"),
sample_2, sample_dir])
else:
subprocess.call(["qsub", workspace + "download.sh", sample_file.get("download"),
sample_file.get("filename"), sample_dir])
PBSTracker.trackPBSQueue(1, "download")
## running fastqc for all samples
def run_fastqc(workflow, project_name, sample_list):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing fastqc..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") > -1:
sample_1 = sample_file.get("filename")[:sample_file.get("filename").find(",")]
sample_2 = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:]
output_file = sample_1.replace(".fastq", "_fastqc.zip")
subprocess.call(["qsub", workspace + "fastqc.sh", sample_dir + sample_1,
sample_dir + output_file])
output_file = sample_2.replace(".fastq", "_fastqc.zip")
subprocess.call(["qsub", workspace + "fastqc.sh", sample_dir + sample_2,
sample_dir + output_file])
else:
output_file = sample_file.get("filename").replace(".fastq", "_fastqc.zip")
subprocess.call(["qsub", workspace + "fastqc.sh", sample_dir + sample_file.get("filename"),
sample_dir + output_file])
PBSTracker.trackPBSQueue(1, "fastqc")
## executing ChipSeq Sequencing alignment
def run_alignment(workflow, project_name, sample_list):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing alignment..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") > -1:
sample_1 = sample_file.get("filename")[:sample_file.get("filename").find(",")]
sample_2 = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:]
subprocess.call(["qsub", "-pe", "smp", "4", workspace + "alignment.sh", sample_dir + sample_1])
subprocess.call(["qsub", "-pe", "smp", "4", workspace + "alignment.sh", sample_dir + sample_2])
else:
subprocess.call(["qsub", "-pe", "smp", "4", workspace + "alignment.sh", sample_dir + sample_file.get("filename")])
PBSTracker.trackPBSQueue(1, "alignment")
## make tag directory
def make_tag_directory(workflow, project_name, sample_list):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing make_tag_directory..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") > -1:
sample_1 = sample_file.get("filename")[:sample_file.get("filename").find(",")]
sample_2 = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:]
input_file = sample_1.replace(".fastq", ".fastq.sam")
output_tag_folder = sample_1[:-6]
subprocess.call(["qsub", workspace + "make_tag_directory.sh",
sample_dir + output_tag_folder, sample_dir + input_file])
input_file = sample_2.replace(".fastq", ".fastq.sam")
output_tag_folder = sample_2[:-6]
subprocess.call(["qsub", workspace + "make_tag_directory.sh",
sample_dir + output_tag_folder, sample_dir + input_file])
else:
input_file = sample_file.get("filename").replace(".fastq", ".fastq.sam")
output_tag_folder = sample_file.get("filename")[:-6]
subprocess.call(["qsub", workspace + "make_tag_directory.sh",
sample_dir + output_tag_folder, sample_dir + input_file])
PBSTracker.trackPBSQueue(1, "make_tag")
## make UCSC file
def make_UCSC_file(workflow, project_name, sample_list):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing make_UCSC_directory..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") > -1:
sample_1 = sample_file.get("filename")[:sample_file.get("filename").find(",")]
sample_2 = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:]
input_tag_folder = sample_1[:-6]
subprocess.call(["qsub", workspace + "make_UCSC_file.sh", sample_dir + input_tag_folder])
input_tag_folder = sample_2[:-6]
subprocess.call(["qsub", workspace + "make_UCSC_file.sh", sample_dir + input_tag_folder])
else:
input_tag_folder = sample_file.get("filename")[:-6]
subprocess.call(["qsub", workspace + "make_UCSC_file.sh", sample_dir + input_tag_folder])
PBSTracker.trackPBSQueue(1, "make_UCSC")
## find peaks
def find_peaks(workflow, project_name, sample_list, style):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing find_peaks..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") < 0:
chip_tag_folder = sample_file.get("filename")[:-6]
if style == "factor":
output_peak_file = chip_tag_folder + "/peaks.txt"
subprocess.call(["qsub", workspace + "find_peaks.sh", sample_dir + chip_tag_folder, style,
sample_dir + output_peak_file])
if style == "histone":
output_peak_file = chip_tag_folder + "/regions.txt"
subprocess.call(["qsub", workspace + "find_peaks.sh", sample_dir + chip_tag_folder, style,
sample_dir + output_peak_file])
else:
chip_tag_folder = sample_file.get("filename")[:sample_file.get("filename").find(",")][:-6]
input_tag_folder = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:][:-6]
print chip_tag_folder
print input_tag_folder
if style == "factor":
output_peak_file = chip_tag_folder + "_vs_" + input_tag_folder + "/peaks.txt"
subprocess.call(["qsub", workspace + "find_peaks.sh", sample_dir + chip_tag_folder, style,
sample_dir + output_peak_file, sample_dir + input_tag_folder])
if style == "histone":
output_peak_file = chip_tag_folder + "_vs_" + input_tag_folder + "/regions.txt"
subprocess.call(["qsub", workspace + "find_peaks.sh", sample_dir + chip_tag_folder, style,
sample_dir + output_peak_file, sample_dir + input_tag_folder])
PBSTracker.trackPBSQueue(1, "find_peaks")
## annotate peaks
def annotate_peaks(workflow, project_name, sample_list, style, genome):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing annotate_peaks..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") < 0:
input_tag_folder = sample_file.get("filename")[:-6]
if style == "factor":
subprocess.call(["qsub", workspace + "annotate_peaks.sh", sample_dir + input_tag_folder + "/peaks.txt",
sample_dir + input_tag_folder + "/peaks.annotate.txt", input_tag_folder, genome])
if style == "histone":
subprocess.call(["qsub", workspace + "annotate_peaks.sh", sample_dir + input_tag_folder + "/regions.txt",
sample_dir + input_tag_folder + "/regions.annotate.txt", input_tag_folder, genome])
else:
chip_tag_folder = sample_file.get("filename")[:sample_file.get("filename").find(",")][:-6]
input_tag_folder = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:][:-6]
group_tag_folder = chip_tag_folder + "_vs_" + input_tag_folder
if style == "factor":
subprocess.call(["qsub", workspace + "annotate_peaks.sh", sample_dir + group_tag_folder + "/peaks.txt",
sample_dir + group_tag_folder + "/peaks.annotate.txt", sample_dir + group_tag_folder, genome])
if style == "histone":
subprocess.call(["qsub", workspace + "annotate_peaks.sh", sample_dir + group_tag_folder + "/regions.txt",
sample_dir + group_tag_folder + "/regions.annotate.txt", sample_dir + group_tag_folder, genome])
PBSTracker.trackPBSQueue(1, "annotate_p")
## pos2bed
def pos2bed(workflow, project_name, sample_list, style):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing pos2bed..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") < 0:
input_tag_folder = sample_file[:-6]
if style == "factor":
subprocess.call(["qsub", workspace + "pos2bed.sh", sample_dir + input_tag_folder + "/peaks.txt",
sample_dir + input_tag_folder + "/output.bed"])
if style == "histone":
subprocess.call(["qsub", workspace + "pos2bed.sh", sample_dir + input_tag_folder + "/regions.txt",
sample_dir + input_tag_folder + "/output.bed"])
else:
chip_tag_folder = sample_file.get("filename")[:sample_file.get("filename").find(",")][:-6]
input_tag_folder = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:][:-6]
group_tag_folder = chip_tag_folder + "_vs_" + input_tag_folder
if style == "factor":
subprocess.call(["qsub", workspace + "pos2bed.sh", sample_dir + group_tag_folder + "/peaks.txt",
sample_dir + group_tag_folder + "/output.bed"])
if style == "histone":
subprocess.call(["qsub", workspace + "pos2bed.sh", sample_dir + group_tag_folder + "/regions.txt",
sample_dir + group_tag_folder + "/output.bed"])
PBSTracker.trackPBSQueue(1, "pos2bed")
## find motifs genome
def find_motifs_genome(workflow, project_name, sample_list, style, genome):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing find_motifs_genome..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") < 0:
input_tag_folder = sample_file.get("filename")[:-6]
if style == "factor":
subprocess.call(["qsub", workspace + "find_motifs_genome.sh", sample_dir + input_tag_folder + "/peaks.txt",
sample_dir + input_tag_folder + "/MotifOutput/", genome])
if style == "histone":
subprocess.call(["qsub", workspace + "find_motifs_genome.sh", sample_dir + input_tag_folder + "/regions.txt",
sample_dir + input_tag_folder + "/MotifOutput/", genome])
else:
chip_tag_folder = sample_file.get("filename")[:sample_file.get("filename").find(",")][:-6]
input_tag_folder = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:][:-6]
group_tag_folder = chip_tag_folder + "_vs_" + input_tag_folder
if style == "factor":
subprocess.call(["qsub", workspace + "find_motifs_genome.sh", sample_dir + group_tag_folder + "/peaks.txt",
sample_dir + group_tag_folder + "/MotifOutput/", genome])
if style == "histone":
subprocess.call(["qsub", workspace + "find_motifs_genome.sh", sample_dir + group_tag_folder + "/regions.txt",
sample_dir + group_tag_folder + "/MotifOutput/", genome])
PBSTracker.trackPBSQueue(1, "find_motif")
## uploading resulting files to s3.
def upload_files(workflow, project_name, s3_output_files_address):
print "executing upload files..."
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
subprocess.call(["qsub", workspace + "upload.sh", sample_dir, s3_output_files_address + "/" + project_name + "/" + workflow])
PBSTracker.trackPBSQueue(1, "upload")
if __name__ == '__main__':
#yaml_file = "/Users/guorongxu/Desktop/workspace/projects/jupyter-genomics_bitbucket/src/awsCluster/chipSeq/Sample_cDNA.yaml"
yaml_file = sys.argv[1]
run_analysis(yaml_file)
|
ucsd-ccbb/jupyter-genomics
|
src/awsCluster/server/ChipSeqPipeline/homer_workflow/ChipSeqPipeline.py
|
Python
|
mit
| 15,575
|
def extractTttranslationsWordpressCom(item):
'''
Parser for 'tttranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Reiryuu Academy Student Council', 'Reiryuu Academy Student Council', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractTttranslationsWordpressCom.py
|
Python
|
bsd-3-clause
| 680
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Data Objects
###################################
Data objects are used to store typed data coming from an external source (for
example a file on disk). There are three primary data objects provided by
this module, :class:`~exa.core.numerical.Series`, :class:`~exa.core.numerical.DataFrame`,
and :class:`~exa.core.numerical.Field`. The purpose of these objects is to facilitate
conversion of data into "traits" used in visualization and enforce relationships
between data objects in a given container. Any of the objects provided by this
module may be extended.
"""
import warnings
import numpy as np
import pandas as pd
from exa.core.error import RequiredColumnError
class Numerical(object):
"""
Base class for :class:`~exa.core.numerical.Series`,
:class:`~exa.core.numerical.DataFrame`, and :class:`~exa.numerical.Field`
objects, providing default trait functionality and clean representations
when present as part of containers.
"""
def slice_naive(self, key):
"""
Slice a data object based on its index, either by value (.loc) or
position (.iloc).
Args:
key: Single index value, slice, tuple, or list of indices/positionals
Returns:
data: Slice of self
"""
cls = self.__class__
key = check_key(self, key)
return cls(self.loc[key])
def __repr__(self):
name = self.__class__.__name__
return '{0}{1}'.format(name, self.shape)
def __str__(self):
return self.__repr__()
class BaseSeries(Numerical):
"""
Base class for dense and sparse series objects (labeled arrays).
Attributes:
_sname (str): May have a required name (default None)
_iname (str: May have a required index name
_stype (type): May have a required value type
_itype (type): May have a required index type
"""
_metadata = ['name', 'meta']
# These attributes may be set when subclassing Series
_sname = None # Series may have a required name
_iname = None # Series may have a required index name
_stype = None # Series may have a required value type
_itype = None # Series may have a required index type
def __init__(self, *args, **kwargs):
meta = kwargs.pop('meta', None)
super(BaseSeries, self).__init__(*args, **kwargs)
if self._sname is not None and self.name != self._sname:
if self.name is not None:
warnings.warn("Object's name changed")
self.name = self._sname
if self._iname is not None and self.index.name != self._iname:
if self.index.name is not None:
warnings.warn("Object's index name changed")
self.index.name = self._iname
self.meta = meta
class BaseDataFrame(Numerical):
"""
Base class for dense and sparse dataframe objects (labeled matrices).
Note:
If the _cardinal attribute is populated, it will automatically be added
to the _categories and _columns attributes.
Attributes:
_cardinal (tuple): Tuple of column name and raw type that acts as foreign key to index of another table
_index (str): Name of index (may be used as foreign key in another table)
_columns (list): Required columns
_categories (dict): Dict of column names, raw types that if present will be converted to and from categoricals automatically
"""
_metadata = ['name', 'meta']
_cardinal = None # Tuple of column name and raw type that acts as foreign key to index of another table
_index = None # Name of index (may be used as foreign key in another table)
_columns = [] # Required columns
_categories = {} # Dict of column names, raw types that if present will be converted to and from categoricals automatically
def cardinal_groupby(self):
"""
Group this object on it cardinal dimension (_cardinal).
Returns:
grpby: Pandas groupby object (grouped on _cardinal)
"""
g, t = self._cardinal
self[g] = self[g].astype(t)
grpby = self.groupby(g)
self[g] = self[g].astype('category')
return grpby
def slice_cardinal(self, key):
"""
Get the slice of this object by the value or values of the cardinal
dimension.
"""
cls = self.__class__
key = check_key(self, key, cardinal=True)
return cls(self[self[self._cardinal[0]].isin(key)])
def __init__(self, *args, **kwargs):
meta = kwargs.pop('meta', None)
super(BaseDataFrame, self).__init__(*args, **kwargs)
self.meta = meta
class Series(BaseSeries, pd.Series):
"""
A labeled array.
.. code-block:: Python
class MySeries(exa.core.numerical.Series):
_sname = 'data' # series default name
_iname = 'data_index' # series default index name
seri = MySeries(np.random.rand(10**5))
"""
@property
def _constructor(self):
return Series
def copy(self, *args, **kwargs):
"""
Make a copy of this object.
See Also:
For arguments and description of behavior see `pandas docs`_.
.. _pandas docs: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.copy.html
"""
cls = self.__class__ # Note that type conversion does not perform copy
return cls(pd.Series(self).copy(*args, **kwargs))
class DataFrame(BaseDataFrame, pd.DataFrame):
"""
A data table
.. code-block:: Python
class MyDF(exa.core.numerical.DataFrame):
_cardinal = ('cardinal', int)
_index = 'mydf_index'
_columns = ['x', 'y', 'z', 'symbol']
_categories = {'symbol': str}
"""
_constructor_sliced = Series
@property
def _constructor(self):
return DataFrame
def copy(self, *args, **kwargs):
"""
Make a copy of this object.
See Also:
For arguments and description of behavior see `pandas docs`_.
.. _pandas docs: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.copy.html
"""
cls = self.__class__ # Note that type conversion does not perform copy
return cls(pd.DataFrame(self).copy(*args, **kwargs))
def _revert_categories(self):
"""
Inplace conversion to categories.
"""
for column, dtype in self._categories.items():
if column in self.columns:
self[column] = self[column].astype(dtype)
def _set_categories(self):
"""
Inplace conversion from categories.
"""
for column, _ in self._categories.items():
if column in self.columns:
self[column] = self[column].astype('category')
def __init__(self, *args, **kwargs):
super(DataFrame, self).__init__(*args, **kwargs)
if self._cardinal is not None:
self._categories[self._cardinal[0]] = self._cardinal[1]
self._columns.append(self._cardinal[0])
self._set_categories()
if len(self) > 0:
name = self.__class__.__name__
if self._columns:
missing = set(self._columns).difference(self.columns)
if missing:
raise RequiredColumnError(missing, name)
if self.index.name != self._index and self._index is not None:
if self.index.name is not None and self.index.name.decode('utf-8') != self._index:
warnings.warn("Object's index name changed from {} to {}".format(self.index.name, self._index))
self.index.name = self._index
class Field(DataFrame):
"""
A field is defined by field data and field values. Field data defines the
discretization of the field (i.e. its origin in a given space, number of
steps/step spaceing, and endpoint for example). Field values can be scalar
(series) and/or vector (dataframe) data defining the magnitude and/or direction
at each given point.
Note:
The convention for generating the discrete field data and ordering of
the field values must be the same (e.g. discrete field points are
generated x, y, then z and scalar field values are a series object
ordered looping first over x then y, then z).
In addition to the :class:`~exa.core.numerical.DataFrame` attributes, this object
has the following:
"""
@property
def _constructor(self):
return Field
def copy(self, *args, **kwargs):
"""
Make a copy of this object.
Note:
Copies both field data and field values.
See Also:
For arguments and description of behavior see `pandas docs`_.
.. _pandas docs: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.copy.html
"""
cls = self.__class__ # Note that type conversion does not perform copy
data = pd.DataFrame(self).copy(*args, **kwargs)
values = [field.copy() for field in self.field_values]
return cls(data, field_values=values)
def memory_usage(self):
"""
Get the combined memory usage of the field data and field values.
"""
data = super(Field, self).memory_usage()
values = 0
for value in self.field_values:
values += value.memory_usage()
data['field_values'] = values
return data
def slice_naive(self, key):
"""
Naively (on index) slice the field data and values.
Args:
key: Int, slice, or iterable to select data and values
Returns:
field: Sliced field object
"""
cls = self.__class__
key = check_key(self, key)
enum = pd.Series(range(len(self)))
enum.index = self.index
values = self.field_values[enum[key].values]
data = self.loc[key]
return cls(data, field_values=values)
#def slice_cardinal(self, key):
# cls = self.__class__
# grpby = self.cardinal_groupby()
def __init__(self, *args, **kwargs):
# The following check allows creation of a single field (whose field data
# comes from a series object and field values from another series object).
field_values = kwargs.pop("field_values", None)
if args and isinstance(args[0], pd.Series):
args = (args[0].to_frame().T, )
super(Field, self).__init__(*args, **kwargs)
self._metadata = ['field_values']
if isinstance(field_values, (list, tuple, np.ndarray)):
self.field_values = [Series(v) for v in field_values] # Convert type for nice repr
elif field_values is None:
self.field_values = []
elif isinstance(field_values, pd.Series):
self.field_values = [Series(field_values)]
else:
raise TypeError("Wrong type for field_values with type {}".format(type(field_values)))
for i in range(len(self.field_values)):
self.field_values[i].name = i
class Field3D(Field):
"""
Dataframe for storing dimensions of a scalar or vector field of 3D space.
+-------------------+----------+-------------------------------------------+
| Column | Type | Description |
+===================+==========+===========================================+
| nx | int | number of grid points in x |
+-------------------+----------+-------------------------------------------+
| ny | int | number of grid points in y |
+-------------------+----------+-------------------------------------------+
| nz | int | number of grid points in z |
+-------------------+----------+-------------------------------------------+
| ox | float | field origin point in x |
+-------------------+----------+-------------------------------------------+
| oy | float | field origin point in y |
+-------------------+----------+-------------------------------------------+
| oz | float | field origin point in z |
+-------------------+----------+-------------------------------------------+
| xi | float | First component in x |
+-------------------+----------+-------------------------------------------+
| xj | float | Second component in x |
+-------------------+----------+-------------------------------------------+
| xk | float | Third component in x |
+-------------------+----------+-------------------------------------------+
| yi | float | First component in y |
+-------------------+----------+-------------------------------------------+
| yj | float | Second component in y |
+-------------------+----------+-------------------------------------------+
| yk | float | Third component in y |
+-------------------+----------+-------------------------------------------+
| zi | float | First component in z |
+-------------------+----------+-------------------------------------------+
| zj | float | Second component in z |
+-------------------+----------+-------------------------------------------+
| zk | float | Third component in z |
+-------------------+----------+-------------------------------------------+
Note:
Each field should be flattened into an N x 1 (scalar) or N x 3 (vector)
series or dataframe respectively. The orientation of the flattening
should have x as the outer loop and z values as the inner loop (for both
cases). This is sometimes called C-major or C-style order, and has
the last index changing the fastest and the first index changing the
slowest.
See Also:
:class:`~exa.core.numerical.Field`
"""
_columns = ['nx', 'ny', 'nz', 'ox', 'oy', 'oz', 'xi', 'xj', 'xk',
'yi', 'yj', 'yk', 'zi', 'zj', 'zk']
@property
def _constructor(self):
return Field3D
def check_key(data_object, key, cardinal=False):
"""
Update the value of an index key by matching values or getting positionals.
"""
itype = (int, np.int32, np.int64)
if not isinstance(key, itype + (slice, tuple, list, np.ndarray)):
raise KeyError("Unknown key type {} for key {}".format(type(key), key))
keys = data_object.index.values
if cardinal and data_object._cardinal is not None:
keys = data_object[data_object._cardinal[0]].unique()
elif isinstance(key, itype) and key in keys:
key = list(sorted(data_object.index.values[key]))
elif isinstance(key, itype) and key < 0:
key = list(sorted(data_object.index.values[key]))
elif isinstance(key, itype):
key = [key]
elif isinstance(key, slice):
key = list(sorted(data_object.index.values[key]))
elif isinstance(key, (tuple, list, pd.Index)) and not np.all(k in keys for k in key):
key = list(sorted(data_object.index.values[key]))
return key
class SparseDataFrame(BaseDataFrame, pd.SparseDataFrame):
@property
def _constructor(self):
return SparseDataFrame
|
avmarchenko/exa
|
exa/core/numerical.py
|
Python
|
apache-2.0
| 15,937
|
# Copyright 2006 James Tauber and contributors
# Copyright (C) 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Factory
from pyjamas import log
from pyjamas.ui.UIObject import UIObject
from pyjamas.ui import Event
from pyjamas.ui.ClickListener import ClickHandler
from pyjamas.ui.FocusListener import FocusHandler
from pyjamas.ui.KeyboardListener import KeyboardHandler
from pyjamas.ui.MouseListener import MouseHandler, MouseWheelHandler
from pyjamas.ui.DragHandler import DragHandler
from pyjamas.ui.DropHandler import DropHandler
class Widget(UIObject):
"""
Base class for most of the UI classes. This class provides basic services
used by any Widget, including management of parents and adding/removing the
event handler association with the DOM.
"""
def __init__(self, **kwargs):
self.attached = False
self.parent = None
self.layoutData = None
self.contextMenu = None
UIObject.__init__(self, **kwargs)
def getLayoutData(self):
return self.layoutData
def getParent(self):
"""Widgets are kept in a hierarchy, and widgets that have been added to a panel
will have a parent widget that contains them. This retrieves the containing
widget for this widget."""
return self.parent
def isAttached(self):
"""Return whether or not this widget has been attached to the document."""
return self.attached
def setContextMenu(self, menu):
self.contextMenu = menu
if menu is not None:
self.sinkEvents(Event.ONCONTEXTMENU)
else:
self.unsinkEvents(Event.ONCONTEXTMENU)
def onBrowserEvent(self, event):
# farm out the event to convenience handlers.
# detect existence by checking for the listener lists of each
# type of handler. there's probably a better way to do this...
if hasattr(self, "_clickListeners"):
ClickHandler.onBrowserEvent(self, event)
if hasattr(self, "_keyboardListeners"):
KeyboardHandler.onBrowserEvent(self, event)
if hasattr(self, "_mouseListeners"):
MouseHandler.onBrowserEvent(self, event)
if hasattr(self, "_mouseWheelListeners"):
MouseWheelHandler.onBrowserEvent(self, event)
if hasattr(self, "_focusListeners"):
FocusHandler.onBrowserEvent(self, event)
if hasattr(self, "_dragListeners"):
DragHandler.onBrowserEvent(self, event)
if hasattr(self, "_dropListeners"):
DropHandler.onBrowserEvent(self, event)
if self.contextMenu is None:
return True
type = DOM.eventGetType(event)
if type == "contextmenu":
DOM.eventCancelBubble(event, True)
DOM.eventPreventDefault(event)
self.contextMenu.onContextMenu(self)
return False
return True
def onLoad(self):
pass
def doDetachChildren(self):
pass
def doAttachChildren(self):
pass
def onAttach(self):
"""Called when this widget has an element, and that element is on the document's
DOM tree, and we have a parent widget."""
if self.isAttached():
return
self.attached = True
DOM.setEventListener(self.getElement(), self)
self.doAttachChildren()
self.onLoad()
def onDetach(self):
"""Called when this widget is being removed from the DOM tree of the document."""
if not self.isAttached():
return
self.doDetachChildren()
self.attached = False
DOM.setEventListener(self.getElement(), None)
def setLayoutData(self, layoutData):
self.layoutData = layoutData
def setParent(self, parent):
"""Update the parent attribute. If the parent is currently attached to the DOM this
assumes we are being attached also and calls onAttach()."""
oldparent = self.parent
self.parent = parent
if parent is None:
if oldparent is not None and oldparent.attached:
self.onDetach()
elif parent.attached:
self.onAttach()
def removeFromParent(self):
"""Remove ourself from our parent. The parent widget will call setParent(None) on
us automatically"""
if hasattr(self.parent, "remove"):
self.parent.remove(self)
def getID(self):
"""Get the id attribute of the associated DOM element."""
return DOM.getAttribute(self.getElement(), "id")
def setID(self, id):
"""Set the id attribute of the associated DOM element."""
DOM.setAttribute(self.getElement(), "id", id)
Factory.registerClass('pyjamas.ui.Widget', 'Widget', Widget)
|
anandology/pyjamas
|
library/gwt/ui/Widget.py
|
Python
|
apache-2.0
| 5,366
|
from .decorators import render_to_json
from .helper import HeadFileUploader, ImageFactory, BaseModelManager, get_first_letter, convertjson
|
xlk521/cloudguantou
|
utils/__init__.py
|
Python
|
bsd-3-clause
| 138
|
#! /usr/bin/python
import readline
commands = [
"eggs",
"cheese",
"bread",
"five"
]
def completer(text, state):
print "in completer"
options = [i for i in commands if i.startswith(text)]
if state < len(options):
return options[state]
else:
return None
readline.parse_and_bind("tab: complete")
readline.set_completer(completer)
|
CospanDesign/python
|
autocomplete/autocomplete.py
|
Python
|
mit
| 379
|
from __future__ import unicode_literals
import io
import os
import re
import warnings
from django.apps import apps
from django.conf import settings
from django.core.management.base import CommandError
from django.db import models, router
from django.utils.deprecation import RemovedInDjango19Warning
def check_for_migrations(app_config, connection):
# Inner import, else tests imports it too early as it needs settings
from django.db.migrations.loader import MigrationLoader
loader = MigrationLoader(connection)
if app_config.label in loader.migrated_apps:
raise CommandError(
"App '%s' has migrations. Only the sqlmigrate and sqlflush commands "
"can be used when an app has migrations." % app_config.label
)
def sql_create(app_config, style, connection):
"Returns a list of the CREATE TABLE SQL statements for the given app."
check_for_migrations(app_config, connection)
if connection.settings_dict['ENGINE'] == 'django.db.backends.dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set ENGINE for the database.
raise CommandError("Django doesn't know which syntax to use for your SQL statements,\n" +
"because you haven't properly specified the ENGINE setting for the database.\n" +
"see: https://docs.djangoproject.com/en/dev/ref/settings/#databases")
# Get installed models, so we generate REFERENCES right.
# We trim models from the current app so that the sqlreset command does not
# generate invalid SQL (leaving models out of known_models is harmless, so
# we can be conservative).
app_models = list(app_config.get_models(include_auto_created=True))
final_output = []
tables = connection.introspection.table_names()
known_models = set(model for model in connection.introspection.installed_models(tables) if model not in app_models)
pending_references = {}
for model in router.get_migratable_models(app_config, connection.alias, include_auto_created=True):
output, references = connection.creation.sql_create_model(model, style, known_models)
final_output.extend(output)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in known_models:
final_output.extend(connection.creation.sql_for_pending_references(refto, style, pending_references))
final_output.extend(connection.creation.sql_for_pending_references(model, style, pending_references))
# Keep track of the fact that we've created the table for this model.
known_models.add(model)
# Handle references to tables that are from other apps
# but don't exist physically.
not_installed_models = set(pending_references.keys())
if not_installed_models:
alter_sql = []
for model in not_installed_models:
alter_sql.extend(['-- ' + sql for sql in
connection.creation.sql_for_pending_references(model, style, pending_references)])
if alter_sql:
final_output.append('-- The following references should be added but depend on non-existent tables:')
final_output.extend(alter_sql)
return final_output
def sql_delete(app_config, style, connection, close_connection=True):
"Returns a list of the DROP TABLE SQL statements for the given app."
check_for_migrations(app_config, connection)
# This should work even if a connection isn't available
try:
cursor = connection.cursor()
except Exception:
cursor = None
try:
# Figure out which tables already exist
if cursor:
table_names = connection.introspection.table_names(cursor)
else:
table_names = []
output = []
# Output DROP TABLE statements for standard application tables.
to_delete = set()
references_to_delete = {}
app_models = router.get_migratable_models(app_config, connection.alias, include_auto_created=True)
for model in app_models:
if cursor and connection.introspection.table_name_converter(model._meta.db_table) in table_names:
# The table exists, so it needs to be dropped
opts = model._meta
for f in opts.local_fields:
if f.rel and f.rel.to not in to_delete:
references_to_delete.setdefault(f.rel.to, []).append((model, f))
to_delete.add(model)
for model in app_models:
if connection.introspection.table_name_converter(model._meta.db_table) in table_names:
output.extend(connection.creation.sql_destroy_model(model, references_to_delete, style))
finally:
# Close database connection explicitly, in case this output is being piped
# directly into a database client, to avoid locking issues.
if cursor and close_connection:
cursor.close()
connection.close()
if not output:
output.append('-- App creates no tables in the database. Nothing to do.')
return output[::-1] # Reverse it, to deal with table dependencies.
def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True, include_views=False)
else:
tables = connection.introspection.table_names(include_views=False)
seqs = connection.introspection.sequence_list() if reset_sequences else ()
statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade)
return statements
def sql_custom(app_config, style, connection):
"Returns a list of the custom table modifying SQL statements for the given app."
check_for_migrations(app_config, connection)
output = []
app_models = router.get_migratable_models(app_config, connection.alias)
for model in app_models:
output.extend(custom_sql_for_model(model, style, connection))
return output
def sql_indexes(app_config, style, connection):
"Returns a list of the CREATE INDEX SQL statements for all models in the given app."
check_for_migrations(app_config, connection)
output = []
for model in router.get_migratable_models(app_config, connection.alias, include_auto_created=True):
output.extend(connection.creation.sql_indexes_for_model(model, style))
return output
def sql_destroy_indexes(app_config, style, connection):
"Returns a list of the DROP INDEX SQL statements for all models in the given app."
check_for_migrations(app_config, connection)
output = []
for model in router.get_migratable_models(app_config, connection.alias, include_auto_created=True):
output.extend(connection.creation.sql_destroy_indexes_for_model(model, style))
return output
def sql_all(app_config, style, connection):
check_for_migrations(app_config, connection)
"Returns a list of CREATE TABLE SQL, initial-data inserts, and CREATE INDEX SQL for the given module."
return (
sql_create(app_config, style, connection) +
sql_custom(app_config, style, connection) +
sql_indexes(app_config, style, connection)
)
def _split_statements(content):
# Private API only called from code that emits a RemovedInDjango19Warning.
comment_re = re.compile(r"^((?:'[^']*'|[^'])*?)--.*$")
statements = []
statement = []
for line in content.split("\n"):
cleaned_line = comment_re.sub(r"\1", line).strip()
if not cleaned_line:
continue
statement.append(cleaned_line)
if cleaned_line.endswith(";"):
statements.append(" ".join(statement))
statement = []
return statements
def custom_sql_for_model(model, style, connection):
opts = model._meta
app_dirs = []
app_dir = apps.get_app_config(model._meta.app_label).path
app_dirs.append(os.path.normpath(os.path.join(app_dir, 'sql')))
# Deprecated location -- remove in Django 1.9
old_app_dir = os.path.normpath(os.path.join(app_dir, 'models/sql'))
if os.path.exists(old_app_dir):
warnings.warn("Custom SQL location '<app_label>/models/sql' is "
"deprecated, use '<app_label>/sql' instead.",
RemovedInDjango19Warning)
app_dirs.append(old_app_dir)
output = []
# Post-creation SQL should come before any initial SQL data is loaded.
# However, this should not be done for models that are unmanaged or
# for fields that are part of a parent model (via model inheritance).
if opts.managed:
post_sql_fields = [f for f in opts.local_fields if hasattr(f, 'post_create_sql')]
for f in post_sql_fields:
output.extend(f.post_create_sql(style, model._meta.db_table))
# Find custom SQL, if it's available.
backend_name = connection.settings_dict['ENGINE'].split('.')[-1]
sql_files = []
for app_dir in app_dirs:
sql_files.append(os.path.join(app_dir, "%s.%s.sql" % (opts.model_name, backend_name)))
sql_files.append(os.path.join(app_dir, "%s.sql" % opts.model_name))
for sql_file in sql_files:
if os.path.exists(sql_file):
with io.open(sql_file, encoding=settings.FILE_CHARSET) as fp:
output.extend(connection.ops.prepare_sql_script(fp.read(), _allow_fallback=True))
return output
def emit_pre_migrate_signal(create_models, verbosity, interactive, db):
# Emit the pre_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running pre-migrate handlers for application %s" % app_config.label)
models.signals.pre_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
# For backwards-compatibility -- remove in Django 1.9.
models.signals.pre_syncdb.send(
sender=app_config.models_module,
app=app_config.models_module,
create_models=create_models,
verbosity=verbosity,
interactive=interactive,
db=db)
def emit_post_migrate_signal(created_models, verbosity, interactive, db):
# Emit the post_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running post-migrate handlers for application %s" % app_config.label)
models.signals.post_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
# For backwards-compatibility -- remove in Django 1.9.
models.signals.post_syncdb.send(
sender=app_config.models_module,
app=app_config.models_module,
created_models=created_models,
verbosity=verbosity,
interactive=interactive,
db=db)
|
andyzsf/django
|
django/core/management/sql.py
|
Python
|
bsd-3-clause
| 11,489
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2014 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tree level output for R
This module defines functions that generate R code to make local
predictions
"""
import sys
from bigml.tree_utils import (
to_camel_js, sort_fields, docstring_comment, slugify,
INDENT, MAX_ARGS_LENGTH, TERM_OPTIONS, ITEM_OPTIONS,
TM_TOKENS, TM_FULL_TERM, TM_ALL)
from bigml.model import Model
from bigmler.export.out_tree.rtree import RTree
from bigmler.reports import BIGMLER_SCRIPT
# templates for static javascript
TERM_TEMPLATE = "%s/static/out_model/term_analysis.R" % BIGMLER_SCRIPT
ITEMS_TEMPLATE = "%s/static/out_model/items_analysis.R" % BIGMLER_SCRIPT
def dot(name):
"""Creates a dot-separated name
"""
return slugify(name.replace(" ", "."))
class RModel(Model):
def __init__(self, model, api=None, fields=None):
"""Empty attributes to be overriden
"""
self.tree_class = RTree
Model.__init__(self, model, api, fields)
def plug_in(self, out=sys.stdout, filter_id=None, subtree=True):
"""Writes an R function that implements the model.
"""
# fill the dotted variable names with the R_KEYWORDS restrictions
objective_field = self.tree.fields[self.tree.objective_id]
camelcase = to_camel_js(objective_field['name'], False)
objective_field['CamelCase'] = camelcase
default = "NA"
args = []
for field in [(key, val) for key, val in
sort_fields(self.tree.fields)]:
field_obj = self.tree.fields[field[0]]
field_obj['dotted'] = dot(field_obj['name'])
args.append("%s=%s" % (field_obj['dotted'], default))
body, term_analysis_predicates, item_analysis_predicates = \
self.tree.plug_in_body()
terms_body = ""
items_body = ""
if term_analysis_predicates:
terms_body = self.r_term_analysis_body(term_analysis_predicates)
if item_analysis_predicates:
items_body = self.r_item_analysis_body(item_analysis_predicates)
predictor_definition = ("predict%s <- function" %
camelcase)
depth = len(predictor_definition) + 1
predictor = "%s(%s){\n" % (predictor_definition,
(",\n" + " " * depth).join(args))
join_str = "\n#"
docstring = join_str.join(self.docstring().split("\n"))
predictor_doc = ("# " + docstring +
"\n" + "#\n")
output = predictor_doc + predictor
output += terms_body + items_body + body
output += "%sreturn(NA)\n}\n" % INDENT
out.write(output)
out.flush()
def r_term_analysis_body(self, term_analysis_predicates):
""" Writes auxiliary functions to handle the term analysis fields
"""
term_analysis_options = set([x[0] for x in term_analysis_predicates])
term_analysis_predicates = set(term_analysis_predicates)
body = ""
# static content
body += """
TERM_ANALYSIS <- list("""
lines = []
for field_id in term_analysis_options:
inner_lines = []
field = self.tree.fields[field_id]
lines.append("""
\"%s\"=list(""" % field['dotted'])
options = sorted(field['term_analysis'].keys())
for option in options:
if option in TERM_OPTIONS:
value = repr(field['term_analysis'][option])
value = value if not value.startswith("u") else value[1:]
if value == 'True':
value = 'TRUE'
elif value == 'False':
value = 'FALSE'
inner_lines.append("""\
\"%s\"= %s""" % (option, value))
if inner_lines:
lines[-1] = lines[-1] + "\n" + ",\n".join(inner_lines)
lines[-1] = lines[-1] + """
)"""
body += ",\n".join(lines) + """
)"""
if term_analysis_predicates:
term_forms = {}
fields = self.tree.fields
for field_id, term in term_analysis_predicates:
alternatives = []
field = fields[field_id]
if field['dotted'] not in term_forms:
term_forms[field['dotted']] = {}
all_forms = field['summary'].get('term_forms', {})
if all_forms:
alternatives = all_forms.get(term, [])
if alternatives:
terms = [term]
terms.extend(all_forms.get(term, []))
term_forms[field['dotted']][term] = terms
body += """
TERM_FORMS <- list("""
lines = []
for field in term_forms:
inner_lines = []
lines.append("""
\"%s\"=list(""" % field)
terms = sorted(term_forms[field].keys())
for term in terms:
terms_list = "list(\"" + \
"\", \"".join(term_forms[field][term])
terms_list += "\")"
inner_lines.append("""\
\"%s\"=%s""" % (term,
terms_list))
if inner_lines:
lines[-1] = lines[-1] + "\n" + ",\n".join(inner_lines)
lines[-1] = lines[-1] + """
)"""
body += ",\n".join(lines) + """
)
"""
body += """
TM_TOKENS <- "%s"
TM_FULL_TERM <- "%s"
TM_ALL <- "%s"\
""" % (TM_TOKENS, TM_FULL_TERM, TM_ALL)
with open(TERM_TEMPLATE) as template_handler:
body += template_handler.read()
return body
def r_item_analysis_body(self, item_analysis_predicates):
""" Writes auxiliary functions to handle the item analysis fields
"""
item_analysis_options = set([x[0] for x in item_analysis_predicates])
item_analysis_predicates = set(item_analysis_predicates)
body = ""
# static content
body += """
ITEM_ANALYSIS <- list("""
lines = []
for field_id in item_analysis_options:
inner_lines = []
field = self.tree.fields[field_id]
lines.append("""
\"%s\"=list(""" % field['dotted'])
for option in field['item_analysis']:
if option in ITEM_OPTIONS and field['item_analysis'][option] \
is not None:
value = repr(field['item_analysis'][option])
value = value if not value.startswith("u") else value[1:]
inner_lines.append("""\
\"%s\"=%s""" % (option, value))
if inner_lines:
lines[-1] = lines[-1] + "\n" + ",\n".join(inner_lines)
lines[-1] = lines[-1] + """
)"""
body += ",\n".join(lines) + """
)
"""
with open(ITEMS_TEMPLATE) as template_handler:
body += template_handler.read()
return body
|
jaor/bigmler
|
bigmler/export/out_model/rmodel.py
|
Python
|
apache-2.0
| 7,665
|
from __future__ import absolute_import
from collections import defaultdict
import itertools
import sys
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
whitespace_re
)
import six
__all__ = [
'HTMLTreeBuilder',
'SAXTreeBuilder',
'TreeBuilder',
'TreeBuilderRegistry',
]
# Some useful features for a TreeBuilder to have.
FAST = 'fast'
PERMISSIVE = 'permissive'
STRICT = 'strict'
XML = 'xml'
HTML = 'html'
HTML_5 = 'html5'
class TreeBuilderRegistry(object):
def __init__(self):
self.builders_for_feature = defaultdict(list)
self.builders = []
def register(self, treebuilder_class):
"""Register a treebuilder based on its advertised features."""
for feature in treebuilder_class.features:
self.builders_for_feature[feature].insert(0, treebuilder_class)
self.builders.insert(0, treebuilder_class)
def lookup(self, *features):
if len(self.builders) == 0:
# There are no builders at all.
return None
if len(features) == 0:
# They didn't ask for any features. Give them the most
# recently registered builder.
return self.builders[0]
# Go down the list of features in order, and eliminate any builders
# that don't match every feature.
features = list(features)
features.reverse()
candidates = None
candidate_set = None
while len(features) > 0:
feature = features.pop()
we_have_the_feature = self.builders_for_feature.get(feature, [])
if len(we_have_the_feature) > 0:
if candidates is None:
candidates = we_have_the_feature
candidate_set = set(candidates)
else:
# Eliminate any candidates that don't have this feature.
candidate_set = candidate_set.intersection(
set(we_have_the_feature))
# The only valid candidates are the ones in candidate_set.
# Go through the original list of candidates and pick the first one
# that's in candidate_set.
if candidate_set is None:
return None
for candidate in candidates:
if candidate in candidate_set:
return candidate
return None
# The BeautifulSoup class will take feature lists from developers and use them
# to look up builders in this registry.
builder_registry = TreeBuilderRegistry()
class TreeBuilder(object):
"""Turn a document into a Beautiful Soup object tree."""
features = []
is_xml = False
preserve_whitespace_tags = set()
empty_element_tags = None # A tag will be considered an empty-element
# tag when and only when it has no contents.
# A value for these tag/attribute combinations is a space- or
# comma-separated list of CDATA, rather than a single CDATA.
cdata_list_attributes = {}
def __init__(self):
self.soup = None
def reset(self):
pass
def can_be_empty_element(self, tag_name):
"""Might a tag with this name be an empty-element tag?
The final markup may or may not actually present this tag as
self-closing.
For instance: an HTMLBuilder does not consider a <p> tag to be
an empty-element tag (it's not in
HTMLBuilder.empty_element_tags). This means an empty <p> tag
will be presented as "<p></p>", not "<p />".
The default implementation has no opinion about which tags are
empty-element tags, so a tag will be presented as an
empty-element tag if and only if it has no contents.
"<foo></foo>" will become "<foo />", and "<foo>bar</foo>" will
be left alone.
"""
if self.empty_element_tags is None:
return True
return tag_name in self.empty_element_tags
def feed(self, markup):
raise NotImplementedError()
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
return markup, None, None, False
def test_fragment_to_document(self, fragment):
"""Wrap an HTML fragment to make it look like a document.
Different parsers do this differently. For instance, lxml
introduces an empty <head> tag, and html5lib
doesn't. Abstracting this away lets us write simple tests
which run HTML fragments through the parser and compare the
results against other HTML fragments.
This method should not be used outside of tests.
"""
return fragment
def set_up_substitutions(self, tag):
return False
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
"""Replaces class="foo bar" with class=["foo", "bar"]
Modifies its input in place.
"""
if not attrs:
return attrs
if self.cdata_list_attributes:
universal = self.cdata_list_attributes.get('*', [])
tag_specific = self.cdata_list_attributes.get(
tag_name.lower(), None)
for attr in attrs.keys():
if attr in universal or (tag_specific and attr in tag_specific):
# We have a "class"-type attribute whose string
# value is a whitespace-separated list of
# values. Split it into a list.
value = attrs[attr]
if isinstance(value, six.string_types):
values = whitespace_re.split(value)
else:
# html5lib sometimes calls setAttributes twice
# for the same tag when rearranging the parse
# tree. On the second call the attribute value
# here is already a list. If this happens,
# leave the value alone rather than trying to
# split it again.
values = value
attrs[attr] = values
return attrs
class SAXTreeBuilder(TreeBuilder):
"""A Beautiful Soup treebuilder that listens for SAX events."""
def feed(self, markup):
raise NotImplementedError()
def close(self):
pass
def startElement(self, name, attrs):
attrs = dict((key[1], value) for key, value in list(attrs.items()))
#print "Start %s, %r" % (name, attrs)
self.soup.handle_starttag(name, attrs)
def endElement(self, name):
#print "End %s" % name
self.soup.handle_endtag(name)
def startElementNS(self, nsTuple, nodeName, attrs):
# Throw away (ns, nodeName) for now.
self.startElement(nodeName, attrs)
def endElementNS(self, nsTuple, nodeName):
# Throw away (ns, nodeName) for now.
self.endElement(nodeName)
#handler.endElementNS((ns, node.nodeName), node.nodeName)
def startPrefixMapping(self, prefix, nodeValue):
# Ignore the prefix for now.
pass
def endPrefixMapping(self, prefix):
# Ignore the prefix for now.
# handler.endPrefixMapping(prefix)
pass
def characters(self, content):
self.soup.handle_data(content)
def startDocument(self):
pass
def endDocument(self):
pass
class HTMLTreeBuilder(TreeBuilder):
"""This TreeBuilder knows facts about HTML.
Such as which tags are empty-element tags.
"""
preserve_whitespace_tags = set(['pre', 'textarea'])
empty_element_tags = set(['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
# The HTML standard defines these attributes as containing a
# space-separated list of values, not a single value. That is,
# class="foo bar" means that the 'class' attribute has two values,
# 'foo' and 'bar', not the single value 'foo bar'. When we
# encounter one of these attributes, we will parse its value into
# a list of values if possible. Upon output, the list will be
# converted back into a string.
cdata_list_attributes = {
"*" : ['class', 'accesskey', 'dropzone'],
"a" : ['rel', 'rev'],
"link" : ['rel', 'rev'],
"td" : ["headers"],
"th" : ["headers"],
"td" : ["headers"],
"form" : ["accept-charset"],
"object" : ["archive"],
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
"area" : ["rel"],
"icon" : ["sizes"],
"iframe" : ["sandbox"],
"output" : ["for"],
}
def set_up_substitutions(self, tag):
# We are only interested in <meta> tags
if tag.name != 'meta':
return False
http_equiv = tag.get('http-equiv')
content = tag.get('content')
charset = tag.get('charset')
# We are interested in <meta> tags that say what encoding the
# document was originally in. This means HTML 5-style <meta>
# tags that provide the "charset" attribute. It also means
# HTML 4-style <meta> tags that provide the "content"
# attribute and have "http-equiv" set to "content-type".
#
# In both cases we will replace the value of the appropriate
# attribute with a standin object that can take on any
# encoding.
meta_encoding = None
if charset is not None:
# HTML 5 style:
# <meta charset="utf8">
meta_encoding = charset
tag['charset'] = CharsetMetaAttributeValue(charset)
elif (content is not None and http_equiv is not None
and http_equiv.lower() == 'content-type'):
# HTML 4 style:
# <meta http-equiv="content-type" content="text/html; charset=utf8">
tag['content'] = ContentMetaAttributeValue(content)
return (meta_encoding is not None)
def register_treebuilders_from(module):
"""Copy TreeBuilders from the given module into this module."""
# I'm fairly sure this is not the best way to do this.
this_module = sys.modules['bs4.builder']
for name in module.__all__:
obj = getattr(module, name)
if issubclass(obj, TreeBuilder):
setattr(this_module, name, obj)
this_module.__all__.append(name)
# Register the builder while we're at it.
this_module.builder_registry.register(obj)
class ParserRejectedMarkup(Exception):
pass
# Builders are registered in reverse order of priority, so that custom
# builder registrations will take precedence. In general, we want lxml
# to take precedence over html5lib, because it's faster. And we only
# want to use HTMLParser as a last result.
from . import _htmlparser
register_treebuilders_from(_htmlparser)
try:
from . import _html5lib
register_treebuilders_from(_html5lib)
except ImportError:
# They don't have html5lib installed.
pass
try:
from . import _lxml
register_treebuilders_from(_lxml)
except ImportError:
# They don't have lxml installed.
pass
|
catapult-project/catapult
|
third_party/beautifulsoup4/bs4/builder/__init__.py
|
Python
|
bsd-3-clause
| 11,207
|
#!/usr/bin/env python
#
# This example shows some examples of scope you can use for dashboards.
#
import sys
from sdcclient import SdcClient
#
# Scopes can be passed to most of dashboard-related functions, e.g. create_dashboard_from_file.
#
# NOTE: convert_scope_string_to_expression should never be used in a user script
# We're going to use it here just to demonstrate some scope options and some constraints
#
def evaluate(scope, expected):
parsed_scope = SdcClient.convert_scope_string_to_expression(scope)
print('{} is valid: {}'.format(scope, parsed_scope[0] is True))
if parsed_scope[0] != expected:
print('Unexpected parsing result!')
sys.exit(1)
# simple example: tag = value
evaluate('proc.name = "cassandra"', True)
# NOTE: For now you can still leave values without quotes.
# The API will be more strict, so please make sure you adopt the new format!
evaluate('proc.name = cassandra', True)
# other operators
evaluate('proc.name != "cassandra"', True)
evaluate('proc.name starts with "cassandra"', True)
evaluate('proc.name contains "cassandra"', True)
# list operators
evaluate('proc.name in ("cassandra", "mysql")', True)
# not-ed expressions
evaluate('not proc.name starts with "cassandra"', True)
evaluate('not proc.name contains "cassandra"', True)
evaluate('not proc.name in ("cassandra", "mysql")', True)
# you can combine multiple expressions; note that only AND'd scopes are currently supported
evaluate('kubernetes.service.name = "database" and proc.name = "cassandra"', True)
# the scope can obviously be omitted in the dashboard configuration
evaluate('', True)
evaluate(None, True)
# invalid scopes will cause errors
evaluate('proc.name == "cassandra"', False) # invalid operator
# currently, one space is required around operands and operators -- improvements will come soon
evaluate('proc.name="cassandra"', False)
#
# The current grammar is unable to validate all errors -- in these cases, the API will fail!
# Improvements will come soon!
#
# Here some errors that will not be detected by the Python library, but the API will
#
evaluate('proc.name = "cassandra" or proc.name = "mysql"', True) # not AND'd expressions are supported
evaluate('proc.name in ("cassandra\', \'mysql")', True) # mismatching quotes
evaluate('proc.name in ("cassandra", "mysql"', True) # missing parenthesis
|
draios/python-sdc-client
|
examples/dashboard_scope.py
|
Python
|
mit
| 2,360
|
from mediawords.db import connect_to_db
def test_connect_to_db():
# Default database
db = connect_to_db()
database_name = db.query('SELECT current_database()').hash()
assert database_name['current_database'] == 'mediacloud'
|
berkmancenter/mediacloud
|
apps/common/tests/python/mediawords/test_db.py
|
Python
|
agpl-3.0
| 242
|
import unittest
import constants as const
from config import SkynetConfig
class SkynetConfigTestCase(unittest.TestCase):
def test_reading_text_level_information(self):
conf = SkynetConfig()
conf.read_config('testconfig.ini')
expected_data = ('text', 'question0', 'answer0', '0')
lvl_data = conf.get_level_info('level0')
self.assertTupleEqual(expected_data, lvl_data)
def test_reading_image_level_information(self):
conf = SkynetConfig()
conf.read_config('testconfig.ini')
expected_data = ('image', 'question2', 'answer2', '2', 'question2.jpg')
lvl_data = conf.get_level_info('level1')
self.assertTupleEqual(expected_data, lvl_data)
def test_getting_question_with_multiple_correct_answers(self):
conf = SkynetConfig()
conf.read_config('testconfig.ini')
lvl_data = conf.get_level_info('level1')
expected_answers = 'answer1 answer18'
self.assertEqual(lvl_data[const.qenum.answers], expected_answers)
if __name__ == '__main__':
unittest.main()
|
Hethurin/skynet-bot
|
testconfig.py
|
Python
|
mit
| 1,089
|
#!/usr/bin/env python
import os, sys, re, string
# the list only for debugging. The real list, used in the real OpenCV build, is specified in CMakeLists.txt
opencv_hdr_list = [
"../../core/include/opencv2/core/core.hpp",
"../../flann/include/opencv2/flann/miniflann.hpp",
"../../ml/include/opencv2/ml/ml.hpp",
"../../imgproc/include/opencv2/imgproc/imgproc.hpp",
"../../calib3d/include/opencv2/calib3d/calib3d.hpp",
"../../features2d/include/opencv2/features2d/features2d.hpp",
"../../video/include/opencv2/video/tracking.hpp",
"../../video/include/opencv2/video/background_segm.hpp",
"../../objdetect/include/opencv2/objdetect/objdetect.hpp",
"../../contrib/include/opencv2/contrib/contrib.hpp",
"../../highgui/include/opencv2/highgui/highgui.hpp"
]
"""
Each declaration is [funcname, return_value_type /* in C, not in Python */, <list_of_modifiers>, <list_of_arguments>],
where each element of <list_of_arguments> is 4-element list itself:
[argtype, argname, default_value /* or "" if none */, <list_of_modifiers>]
where the list of modifiers is yet another nested list of strings
(currently recognized are "/O" for output argument, "/S" for static (i.e. class) methods
and "/A value" for the plain C arrays with counters)
"""
class CppHeaderParser(object):
def __init__(self):
self.BLOCK_TYPE = 0
self.BLOCK_NAME = 1
self.PROCESS_FLAG = 2
self.PUBLIC_SECTION = 3
self.CLASS_DECL = 4
def batch_replace(self, s, pairs):
for before, after in pairs:
s = s.replace(before, after)
return s
def get_macro_arg(self, arg_str, npos):
npos2 = npos3 = arg_str.find("(", npos)
if npos2 < 0:
print "Error: no arguments for the macro at %d" % (self.lineno,)
sys.exit(-1)
balance = 1
while 1:
t, npos3 = self.find_next_token(arg_str, ['(', ')'], npos3+1)
if npos3 < 0:
print "Error: no matching ')' in the macro call at %d" % (self.lineno,)
sys.exit(-1)
if t == '(':
balance += 1
if t == ')':
balance -= 1
if balance == 0:
break
return arg_str[npos2+1:npos3].strip(), npos3
def parse_arg(self, arg_str, argno):
"""
Parses <arg_type> [arg_name]
Returns arg_type, arg_name, modlist, argno, where
modlist is the list of wrapper-related modifiers (such as "output argument", "has counter", ...)
and argno is the new index of an anonymous argument.
That is, if no arg_str is just an argument type without argument name, the argument name is set to
"arg" + str(argno), and then argno is incremented.
"""
modlist = []
# pass 0: extracts the modifiers
if "CV_OUT" in arg_str:
modlist.append("/O")
arg_str = arg_str.replace("CV_OUT", "")
if "CV_IN_OUT" in arg_str:
modlist.append("/IO")
arg_str = arg_str.replace("CV_IN_OUT", "")
isarray = False
npos = arg_str.find("CV_CARRAY")
if npos >= 0:
isarray = True
macro_arg, npos3 = self.get_macro_arg(arg_str, npos)
modlist.append("/A " + macro_arg)
arg_str = arg_str[:npos] + arg_str[npos3+1:]
npos = arg_str.find("CV_CUSTOM_CARRAY")
if npos >= 0:
isarray = True
macro_arg, npos3 = self.get_macro_arg(arg_str, npos)
modlist.append("/CA " + macro_arg)
arg_str = arg_str[:npos] + arg_str[npos3+1:]
arg_str = arg_str.strip()
word_start = 0
word_list = []
npos = -1
#print self.lineno, ":\t", arg_str
# pass 1: split argument type into tokens
while 1:
npos += 1
t, npos = self.find_next_token(arg_str, [" ", "&", "*", "<", ">", ","], npos)
w = arg_str[word_start:npos].strip()
if w == "operator":
word_list.append("operator " + arg_str[npos:].strip())
break
if w not in ["", "const"]:
word_list.append(w)
if t not in ["", " ", "&"]:
word_list.append(t)
if not t:
break
word_start = npos+1
npos = word_start - 1
arg_type = ""
arg_name = ""
angle_stack = []
#print self.lineno, ":\t", word_list
# pass 2: decrypt the list
wi = -1
prev_w = ""
for w in word_list:
wi += 1
if w == "*":
if prev_w == "char" and not isarray:
arg_type = arg_type[:-len("char")] + "c_string"
else:
arg_type += w
continue
elif w == "<":
arg_type += "_"
angle_stack.append(0)
elif w == "," or w == '>':
if not angle_stack:
print "Error at %d: argument contains ',' or '>' not within template arguments" % (self.lineno,)
sys.exit(-1)
if w == ",":
arg_type += "_and_"
elif w == ">":
if angle_stack[0] == 0:
print "Error at %s:%d: template has no arguments" % (self.hname, self.lineno)
sys.exit(-1)
if angle_stack[0] > 1:
arg_type += "_end_"
angle_stack[-1:] = []
elif angle_stack:
arg_type += w
angle_stack[-1] += 1
elif arg_type == "struct":
arg_type += " " + w
elif arg_type and arg_type != "~":
arg_name = " ".join(word_list[wi:])
break
else:
arg_type += w
prev_w = w
counter_str = ""
add_star = False
if ("[" in arg_name) and not ("operator" in arg_str):
#print arg_str
p1 = arg_name.find("[")
p2 = arg_name.find("]",p1+1)
if p2 < 0:
print "Error at %d: no closing ]" % (self.lineno,)
sys.exit(-1)
counter_str = arg_name[p1+1:p2].strip()
if counter_str == "":
counter_str = "?"
if not isarray:
modlist.append("/A " + counter_str.strip())
arg_name = arg_name[:p1]
add_star = True
if not arg_name:
if arg_type.startswith("operator"):
arg_type, arg_name = "", arg_type
else:
arg_name = "arg" + str(argno)
argno += 1
while arg_type.endswith("_end_"):
arg_type = arg_type[:-len("_end_")]
if add_star:
arg_type += "*"
arg_type = self.batch_replace(arg_type, [("std::", ""), ("cv::", ""), ("::", "_")])
return arg_type, arg_name, modlist, argno
def parse_enum(self, decl_str):
l = decl_str
ll = l.split(",")
prev_val = ""
prev_val_delta = -1
decl = []
for pair in ll:
pv = pair.split("=")
if len(pv) == 1:
prev_val_delta += 1
val = ""
if prev_val:
val = prev_val + "+"
val += str(prev_val_delta)
else:
prev_val_delta = 0
prev_val = val = pv[1].strip()
decl.append(["const " + self.get_dotted_name(pv[0].strip()), val, [], []])
return decl
def parse_class_decl(self, decl_str):
"""
Parses class/struct declaration start in the form:
{class|struct} [CV_EXPORTS] <class_name> [: public <base_class1> [, ...]]
Returns class_name1, <list of base_classes>
"""
l = decl_str
modlist = []
if "CV_EXPORTS_W_MAP" in l:
l = l.replace("CV_EXPORTS_W_MAP", "")
modlist.append("/Map")
if "CV_EXPORTS_W_SIMPLE" in l:
l = l.replace("CV_EXPORTS_W_SIMPLE", "")
modlist.append("/Simple")
npos = l.find("CV_EXPORTS_AS")
if npos >= 0:
macro_arg, npos3 = self.get_macro_arg(l, npos)
modlist.append("=" + macro_arg)
l = l[:npos] + l[npos3+1:]
l = self.batch_replace(l, [("CV_EXPORTS_W", ""), ("CV_EXPORTS", ""), ("public virtual ", " "), ("public ", " "), ("::", ".")]).strip()
ll = re.split(r'\s*[,:]?\s*', l)
ll = [le for le in ll if le]
classname = ll[1]
bases = ll[2:]
return classname, bases, modlist
def parse_func_decl_no_wrap(self, decl_str, static_method = False):
decl_str = (decl_str or "").strip()
virtual_method = False
explicit_method = False
if decl_str.startswith("explicit"):
decl_str = decl_str[len("explicit"):].lstrip()
explicit_method = True
if decl_str.startswith("virtual"):
decl_str = decl_str[len("virtual"):].lstrip()
virtual_method = True
if decl_str.startswith("static"):
decl_str = decl_str[len("static"):].lstrip()
static_method = True
fdecl = decl_str.replace("CV_OUT", "").replace("CV_IN_OUT", "")
fdecl = fdecl.strip().replace("\t", " ")
while " " in fdecl:
fdecl = fdecl.replace(" ", " ")
fname = fdecl[:fdecl.find("(")].strip()
fnpos = fname.rfind(" ")
if fnpos < 0:
fnpos = 0
fname = fname[fnpos:].strip()
rettype = fdecl[:fnpos].strip()
if rettype.endswith("operator"):
fname = ("operator " + fname).strip()
rettype = rettype[:rettype.rfind("operator")].strip()
if rettype.endswith("::"):
rpos = rettype.rfind(" ")
if rpos >= 0:
fname = rettype[rpos+1:].strip() + fname
rettype = rettype[:rpos].strip()
else:
fname = rettype + fname
rettype = ""
apos = fdecl.find("(")
if fname.endswith("operator"):
fname += " ()"
apos = fdecl.find("(", apos+1)
fname = "cv." + fname.replace("::", ".")
decl = [fname, rettype, [], []]
# inline constructor implementation
implmatch = re.match(r"(\(.*?\))\s*:\s*(\w+\(.*?\),?\s*)+", fdecl[apos:])
if bool(implmatch):
fdecl = fdecl[:apos] + implmatch.group(1)
args0str = fdecl[apos+1:fdecl.rfind(")")].strip()
if args0str != "" and args0str != "void":
args0str = re.sub(r"\([^)]*\)", lambda m: m.group(0).replace(',', "@comma@"), args0str)
args0 = args0str.split(",")
args = []
narg = ""
for arg in args0:
narg += arg.strip()
balance_paren = narg.count("(") - narg.count(")")
balance_angle = narg.count("<") - narg.count(">")
if balance_paren == 0 and balance_angle == 0:
args.append(narg.strip())
narg = ""
for arg in args:
dfpos = arg.find("=")
defval = ""
if dfpos >= 0:
defval = arg[dfpos+1:].strip()
else:
dfpos = arg.find("CV_DEFAULT")
if dfpos >= 0:
defval, pos3 = self.get_macro_arg(arg, dfpos)
else:
dfpos = arg.find("CV_WRAP_DEFAULT")
if dfpos >= 0:
defval, pos3 = self.get_macro_arg(arg, dfpos)
if dfpos >= 0:
defval = defval.replace("@comma@", ",")
arg = arg[:dfpos].strip()
pos = len(arg)-1
while pos >= 0 and (arg[pos] in "_[]" or arg[pos].isalpha() or arg[pos].isdigit()):
pos -= 1
if pos >= 0:
aname = arg[pos+1:].strip()
atype = arg[:pos+1].strip()
if aname.endswith("&") or aname.endswith("*") or (aname in ["int", "string", "Mat"]):
atype = (atype + " " + aname).strip()
aname = ""
else:
atype = arg
aname = ""
if aname.endswith("]"):
bidx = aname.find('[')
atype += aname[bidx:]
aname = aname[:bidx]
decl[3].append([atype, aname, defval, []])
if static_method:
decl[2].append("/S")
if virtual_method:
decl[2].append("/V")
if explicit_method:
decl[2].append("/E")
if bool(re.match(r".*\)\s*(const)?\s*=\s*0", decl_str)):
decl[2].append("/A")
if bool(re.match(r".*\)\s*const(\s*=\s*0)?", decl_str)):
decl[2].append("/C")
if "virtual" in decl_str:
print decl_str
return decl
def parse_func_decl(self, decl_str):
"""
Parses the function or method declaration in the form:
[([CV_EXPORTS] <rettype>) | CVAPI(rettype)]
[~]<function_name>
(<arg_type1> <arg_name1>[=<default_value1>] [, <arg_type2> <arg_name2>[=<default_value2>] ...])
[const] {; | <function_body>}
Returns the function declaration entry:
[<func name>, <return value C-type>, <list of modifiers>, <list of arguments>] (see above)
"""
if self.wrap_mode:
if not (("CV_EXPORTS_AS" in decl_str) or ("CV_EXPORTS_W" in decl_str) or \
("CV_WRAP" in decl_str) or ("CV_WRAP_AS" in decl_str)):
return []
# ignore old API in the documentation check (for now)
if "CVAPI(" in decl_str and self.wrap_mode:
return []
top = self.block_stack[-1]
func_modlist = []
npos = decl_str.find("CV_EXPORTS_AS")
if npos >= 0:
arg, npos3 = self.get_macro_arg(decl_str, npos)
func_modlist.append("="+arg)
decl_str = decl_str[:npos] + decl_str[npos3+1:]
npos = decl_str.find("CV_WRAP_AS")
if npos >= 0:
arg, npos3 = self.get_macro_arg(decl_str, npos)
func_modlist.append("="+arg)
decl_str = decl_str[:npos] + decl_str[npos3+1:]
# filter off some common prefixes, which are meaningless for Python wrappers.
# note that we do not strip "static" prefix, which does matter;
# it means class methods, not instance methods
decl_str = self.batch_replace(decl_str, [("virtual", ""), ("static inline", ""), ("inline", ""),\
("CV_EXPORTS_W", ""), ("CV_EXPORTS", ""), ("CV_CDECL", ""), ("CV_WRAP ", " "), ("static CV_INLINE", ""), ("CV_INLINE", "")]).strip()
static_method = False
context = top[0]
if decl_str.startswith("static") and (context == "class" or context == "struct"):
decl_str = decl_str[len("static"):].lstrip()
static_method = True
args_begin = decl_str.find("(")
if decl_str.startswith("CVAPI"):
rtype_end = decl_str.find(")", args_begin+1)
if rtype_end < 0:
print "Error at %d. no terminating ) in CVAPI() macro: %s" % (self.lineno, decl_str)
sys.exit(-1)
decl_str = decl_str[args_begin+1:rtype_end] + " " + decl_str[rtype_end+1:]
args_begin = decl_str.find("(")
if args_begin < 0:
print "Error at %d: no args in '%s'" % (self.lineno, decl_str)
sys.exit(-1)
decl_start = decl_str[:args_begin].strip()
# handle operator () case
if decl_start.endswith("operator"):
args_begin = decl_str.find("(", args_begin+1)
if args_begin < 0:
print "Error at %d: no args in '%s'" % (self.lineno, decl_str)
sys.exit(-1)
decl_start = decl_str[:args_begin].strip()
# TODO: normalize all type of operators
if decl_start.endswith("()"):
decl_start = decl_start[0:-2].rstrip() + " ()"
# constructor/destructor case
if bool(re.match(r'^(\w+::)*(?P<x>\w+)::~?(?P=x)$', decl_start)):
decl_start = "void " + decl_start
rettype, funcname, modlist, argno = self.parse_arg(decl_start, -1)
if argno >= 0:
classname = top[1]
if rettype == classname or rettype == "~" + classname:
rettype, funcname = "", rettype
else:
if bool(re.match('\w+\s+\(\*\w+\)\s*\(.*\)', decl_str)):
return [] # function typedef
elif bool(re.match('\w+\s+\(\w+::\*\w+\)\s*\(.*\)', decl_str)):
return [] # class method typedef
elif bool(re.match('[A-Z_]+', decl_start)):
return [] # it seems to be a macro instantiation
elif "__declspec" == decl_start:
return []
elif bool(re.match(r'\w+\s+\(\*\w+\)\[\d+\]', decl_str)):
return [] # exotic - dynamic 2d array
else:
#print rettype, funcname, modlist, argno
print "Error at %s:%d the function/method name is missing: '%s'" % (self.hname, self.lineno, decl_start)
sys.exit(-1)
if self.wrap_mode and (("::" in funcname) or funcname.startswith("~")):
# if there is :: in function name (and this is in the header file),
# it means, this is inline implementation of a class method.
# Thus the function has been already declared within the class and we skip this repeated
# declaration.
# Also, skip the destructors, as they are always wrapped
return []
funcname = self.get_dotted_name(funcname)
if not self.wrap_mode:
decl = self.parse_func_decl_no_wrap(decl_str, static_method)
decl[0] = funcname
return decl
arg_start = args_begin+1
npos = arg_start-1
balance = 1
angle_balance = 0
# scan the argument list; handle nested parentheses
args_decls = []
args = []
argno = 1
while balance > 0:
npos += 1
t, npos = self.find_next_token(decl_str, ["(", ")", ",", "<", ">"], npos)
if not t:
print "Error: no closing ')' at %d" % (self.lineno,)
print decl_str
print decl_str[arg_start:]
sys.exit(-1)
if t == "<":
angle_balance += 1
if t == ">":
angle_balance -= 1
if t == "(":
balance += 1
if t == ")":
balance -= 1
if (t == "," and balance == 1 and angle_balance == 0) or balance == 0:
# process next function argument
a = decl_str[arg_start:npos].strip()
#print "arg = ", a
arg_start = npos+1
if a:
eqpos = a.find("=")
defval = ""
modlist = []
if eqpos >= 0:
defval = a[eqpos+1:].strip()
else:
eqpos = a.find("CV_DEFAULT")
if eqpos >= 0:
defval, pos3 = self.get_macro_arg(a, eqpos)
else:
eqpos = a.find("CV_WRAP_DEFAULT")
if eqpos >= 0:
defval, pos3 = self.get_macro_arg(a, eqpos)
if defval == "NULL":
defval = "0"
if eqpos >= 0:
a = a[:eqpos].strip()
arg_type, arg_name, modlist, argno = self.parse_arg(a, argno)
if self.wrap_mode:
if arg_type == "InputArray":
arg_type = "Mat"
elif arg_type == "InputOutputArray":
arg_type = "Mat"
modlist.append("/IO")
elif arg_type == "OutputArray":
arg_type = "Mat"
modlist.append("/O")
elif arg_type == "InputArrayOfArrays":
arg_type = "vector_Mat"
elif arg_type == "InputOutputArrayOfArrays":
arg_type = "vector_Mat"
modlist.append("/IO")
elif arg_type == "OutputArrayOfArrays":
arg_type = "vector_Mat"
modlist.append("/O")
defval = self.batch_replace(defval, [("InputArrayOfArrays", "vector<Mat>"),
("InputOutputArrayOfArrays", "vector<Mat>"),
("OutputArrayOfArrays", "vector<Mat>"),
("InputArray", "Mat"),
("InputOutputArray", "Mat"),
("OutputArray", "Mat"),
("noArray", arg_type)]).strip()
args.append([arg_type, arg_name, defval, modlist])
npos = arg_start-1
npos = decl_str.replace(" ", "").find("=0", npos)
if npos >= 0:
# skip pure virtual functions
return []
if static_method:
func_modlist.append("/S")
return [funcname, rettype, func_modlist, args]
def get_dotted_name(self, name):
"""
adds the dot-separated container class/namespace names to the bare function/class name, e.g. when we have
namespace cv {
class A {
public:
f(int);
};
}
the function will convert "A" to "cv.A" and "f" to "cv.A.f".
"""
if not self.block_stack:
return name
if name.startswith("cv."):
return name
n = ""
for b in self.block_stack:
block_type, block_name = b[self.BLOCK_TYPE], b[self.BLOCK_NAME]
if block_type in ["file", "enum"]:
continue
if block_type not in ["struct", "class", "namespace"]:
print "Error at %d: there are non-valid entries in the current block stack " % (self.lineno, self.block_stack)
sys.exit(-1)
if block_name:
n += block_name + "."
return n + name.replace("::", ".")
def parse_stmt(self, stmt, end_token):
"""
parses the statement (ending with ';' or '}') or a block head (ending with '{')
The function calls parse_class_decl or parse_func_decl when necessary. It returns
<block_type>, <block_name>, <parse_flag>, <declaration>
where the first 3 values only make sense for blocks (i.e. code blocks, namespaces, classes, enums and such)
"""
stack_top = self.block_stack[-1]
context = stack_top[self.BLOCK_TYPE]
stmt_type = ""
if end_token == "{":
stmt_type = "block"
if context == "block":
print "Error at %d: should not call parse_stmt inside blocks" % (self.lineno,)
sys.exit(-1)
if context == "class" or context == "struct":
while 1:
colon_pos = stmt.find(":")
if colon_pos < 0:
break
w = stmt[:colon_pos].strip()
if w in ["public", "protected", "private"]:
if w == "public" or (not self.wrap_mode and w == "protected"):
stack_top[self.PUBLIC_SECTION] = True
else:
stack_top[self.PUBLIC_SECTION] = False
stmt = stmt[colon_pos+1:].strip()
break
# do not process hidden class members and template classes/functions
if not stack_top[self.PUBLIC_SECTION] or stmt.startswith("template"):
return stmt_type, "", False, None
if end_token == "{":
if not self.wrap_mode and stmt.startswith("typedef struct"):
stmt_type = "struct"
try:
classname, bases, modlist = self.parse_class_decl(stmt[len("typedef "):])
except:
print "Error at %s:%d" % (self.hname, self.lineno)
exit(1)
if classname.startswith("_Ipl"):
classname = classname[1:]
decl = [stmt_type + " " + self.get_dotted_name(classname), "", modlist, []]
if bases:
decl[1] = ": " + ", ".join([b if "::" in b else self.get_dotted_name(b).replace(".","::") for b in bases])
return stmt_type, classname, True, decl
if stmt.startswith("class") or stmt.startswith("struct"):
stmt_type = stmt.split()[0]
if stmt.strip() != stmt_type:
try:
classname, bases, modlist = self.parse_class_decl(stmt)
except:
print "Error at %s:%d" % (self.hname, self.lineno)
exit(1)
decl = []
if ("CV_EXPORTS_W" in stmt) or ("CV_EXPORTS_AS" in stmt) or (not self.wrap_mode):# and ("CV_EXPORTS" in stmt)):
decl = [stmt_type + " " + self.get_dotted_name(classname), "", modlist, []]
if bases:
decl[1] = ": " + ", ".join([b if "::" in b else self.get_dotted_name(b).replace(".","::") for b in bases])
return stmt_type, classname, True, decl
if stmt.startswith("enum"):
return "enum", "", True, None
if stmt.startswith("namespace"):
stmt_list = stmt.split()
if len(stmt_list) < 2:
stmt_list.append("<unnamed>")
return stmt_list[0], stmt_list[1], True, None
if stmt.startswith("extern") and "\"C\"" in stmt:
return "namespace", "", True, None
if end_token == "}" and context == "enum":
decl = self.parse_enum(stmt)
return "enum", "", False, decl
if end_token == ";" and stmt.startswith("typedef"):
# TODO: handle typedef's more intelligently
return stmt_type, "", False, None
paren_pos = stmt.find("(")
if paren_pos >= 0:
# assume it's function or method declaration,
# since we filtered off the other places where '(' can normally occur:
# - code blocks
# - function pointer typedef's
decl = self.parse_func_decl(stmt)
# we return parse_flag == False to prevent the parser to look inside function/method bodies
# (except for tracking the nested blocks)
return stmt_type, "", False, decl
if (context == "struct" or context == "class") and end_token == ";" and stmt:
# looks like it's member declaration; append the members to the class declaration
class_decl = stack_top[self.CLASS_DECL]
if ("CV_PROP" in stmt): # or (class_decl and ("/Map" in class_decl[2])):
var_modlist = []
if "CV_PROP_RW" in stmt:
var_modlist.append("/RW")
stmt = self.batch_replace(stmt, [("CV_PROP_RW", ""), ("CV_PROP", "")]).strip()
var_list = stmt.split(",")
var_type, var_name1, modlist, argno = self.parse_arg(var_list[0], -1)
var_list = [var_name1] + [i.strip() for i in var_list[1:]]
for v in var_list:
class_decl[3].append([var_type, v, "", var_modlist])
return stmt_type, "", False, None
# something unknown
return stmt_type, "", False, None
def find_next_token(self, s, tlist, p=0):
"""
Finds the next token from the 'tlist' in the input 's', starting from position 'p'.
Returns the first occured token and its position, or ("", len(s)) when no token is found
"""
token = ""
tpos = len(s)
for t in tlist:
pos = s.find(t, p)
if pos < 0:
continue
if pos < tpos:
tpos = pos
token = t
return token, tpos
def parse(self, hname, wmode=True):
"""
The main method. Parses the input file.
Returns the list of declarations (that can be print using print_decls)
"""
self.hname = hname
decls = []
f = open(hname, "rt")
linelist = list(f.readlines())
f.close()
# states:
SCAN = 0 # outside of a comment or preprocessor directive
COMMENT = 1 # inside a multi-line comment
DIRECTIVE = 2 # inside a multi-line preprocessor directive
state = SCAN
self.block_stack = [["file", hname, True, True, None]]
block_head = ""
self.lineno = 0
self.wrap_mode = wmode
for l0 in linelist:
self.lineno += 1
#print self.lineno
l = l0.strip()
if state == SCAN and l.startswith("#"):
state = DIRECTIVE
# fall through to the if state == DIRECTIVE check
if state == DIRECTIVE:
if not l.endswith("\\"):
state = SCAN
continue
if state == COMMENT:
pos = l.find("*/")
if pos < 0:
continue
l = l[pos+2:]
state = SCAN
if state != SCAN:
print "Error at %d: invlid state = %d" % (self.lineno, state)
sys.exit(-1)
while 1:
token, pos = self.find_next_token(l, [";", "\"", "{", "}", "//", "/*"])
if not token:
block_head += " " + l
break
if token == "//":
block_head += " " + l[:pos]
break
if token == "/*":
block_head += " " + l[:pos]
pos = l.find("*/", pos+2)
if pos < 0:
state = COMMENT
break
l = l[pos+2:]
continue
if token == "\"":
pos2 = pos + 1
while 1:
t2, pos2 = self.find_next_token(l, ["\\", "\""], pos2)
if t2 == "":
print "Error at %d: no terminating '\"'" % (self.lineno,)
sys.exit(-1)
if t2 == "\"":
break
pos2 += 2
block_head += " " + l[:pos2+1]
l = l[pos2+1:]
continue
stmt = (block_head + " " + l[:pos]).strip()
stmt = " ".join(stmt.split()) # normalize the statement
stack_top = self.block_stack[-1]
if stmt.startswith("@"):
# Objective C ?
break
decl = None
if stack_top[self.PROCESS_FLAG]:
# even if stack_top[PUBLIC_SECTION] is False, we still try to process the statement,
# since it can start with "public:"
stmt_type, name, parse_flag, decl = self.parse_stmt(stmt, token)
if decl:
if stmt_type == "enum":
for d in decl:
decls.append(d)
else:
decls.append(decl)
else:
stmt_type, name, parse_flag = "block", "", False
if token == "{":
if stmt_type == "class":
public_section = False
else:
public_section = True
self.block_stack.append([stmt_type, name, parse_flag, public_section, decl])
if token == "}":
if not self.block_stack:
print "Error at %d: the block stack is empty" % (self.lineno,)
self.block_stack[-1:] = []
if pos+1 < len(l) and l[pos+1] == ';':
pos += 1
block_head = ""
l = l[pos+1:]
return decls
def print_decls(self, decls):
"""
Prints the list of declarations, retrieived by the parse() method
"""
for d in decls:
print d[0], d[1], ";".join(d[2])
for a in d[3]:
print " ", a[0], a[1], a[2],
if a[3]:
print "; ".join(a[3])
else:
print
if __name__ == '__main__':
parser = CppHeaderParser()
decls = []
for hname in opencv_hdr_list:
decls += parser.parse(hname)
#for hname in sys.argv[1:]:
#decls += parser.parse(hname, wmode=False)
parser.print_decls(decls)
print len(decls)
|
grace-/opencv-3.0.0-cvpr
|
opencv/modules/python/src2/hdr_parser.py
|
Python
|
bsd-3-clause
| 33,980
|
'''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class Rds20140815CheckAccountNameAvailableRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.AccountName = None
self.DBInstanceId = None
self.resourceOwnerAccount = None
def getapiname(self):
return 'rds.aliyuncs.com.CheckAccountNameAvailable.2014-08-15'
|
francisar/rds_manager
|
aliyun/api/rest/Rds20140815CheckAccountNameAvailableRequest.py
|
Python
|
mit
| 408
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from twisted.trial import unittest
from hiitrack.lib.spatial import spatial_hash
from random import randint
from base64 import b64encode
class SpatialTestCase(unittest.TestCase):
def test_generation(self):
size = randint(0, 1000000)
point_count = 1000
dimensions = 2
boundaries = []
box = []
points = []
i = 0
for dimension in range(0, dimensions):
lower_limit = randint(0, size)
upper_limit = randint(lower_limit, size)
lower_box = randint(lower_limit, upper_limit)
upper_box = randint(lower_box, upper_limit)
boundaries.append((lower_limit, upper_limit))
box.append((lower_box, upper_box))
low_point = [x[0] for x in box]
high_point = [x[1] for x in box]
for i in range(0, point_count):
point = []
for dimension in range(0, dimensions):
coordinate = randint(boundaries[dimension][0], boundaries[dimension][1])
point.append(coordinate)
points.append((spatial_hash(point, boundaries), point))
low_point_hash = spatial_hash(low_point, boundaries)
high_point_hash = spatial_hash(high_point, boundaries)
box_misses = 0
for sh, coordinates in points:
in_box = []
for dimension in range(0, dimensions):
in_box.append(coordinates[dimension] > low_point[dimension] and
coordinates[dimension] < high_point[dimension])
if all(in_box) != (sh > low_point_hash and sh < high_point_hash):
box_misses += 1
print float(box_misses) / point_count
|
hiidef/hiitrack-api
|
tests/spatial.py
|
Python
|
mit
| 1,469
|
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
import wx
from pigui import colours,fonts,utils,drawutils
class BelcantoPanel(wx.Window):
def __init__(self,parent,size,agent,style=wx.BORDER_NONE):
wx.Window.__init__(self,parent,-1,size=size,style=style)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.model=agent.historyModel
self.model.addHistoryListener(self)
self.SetBackgroundColour(colours.borderGradient1)
self.__agent=agent
self.Bind(wx.EVT_PAINT, self.onPaint)
self.font=wx.Font(13,wx.FONTFAMILY_MODERN,wx.FONTSTYLE_NORMAL,weight=wx.FONTWEIGHT_LIGHT)
def __getClientDC(self):
if self.IsDoubleBuffered():
dc=wx.ClientDC(self)
else:
dc=wx.BufferedDC(wx.ClientDC(self))
dc.Clear()
brush=dc.GetBrush()
brush.SetColour(colours.borderGradient1)
dc.SetBackground(brush)
dc.SetFont(self.font)
return dc
def onPaint(self,evt):
dc=self.__getClientDC()
self.doPaint(dc)
evt.Skip()
def historyUpdate(self):
print 'BelcantoPanel:update'
if self.model.words:
dc=self.__getClientDC()
self.doPaint(dc)
def updateStatus(self,str):
print 'BelcantoPanel:updateStatus',str
# self.agent.updateStatus(str)
# self.status=str
# self.update()
def doPaint(self,dc):
self.__backgroundDrawing(dc)
y=2
x=2
print 'belcanto_panel:doPaint',self.model.words
pts,weight=fonts.setFont(dc,ptfactor=0.75)
displayText=[]
maxWidth=0.95*self.GetClientSize()[0]
widthTolerance=0.1*maxWidth
#utils.chopString(self.model.get_text(),dc,maxWidth,displayText,widthTolerance,True)
utils.chopString2(self.model.get_text(),dc,maxWidth,displayText,0,True)
yInc=1.1*dc.GetTextExtent('A')[1]
for choppedLine in displayText:
dc.DrawText(choppedLine,x,y)
print 'BelcantoPanel',choppedLine
y=y+yInc
self.__borderDrawing(dc)
fonts.resetFont(dc,pts,weight)
def __backgroundDrawing(self,dc):
size=self.GetClientSize()
drawutils.setPenColour(dc,colours.borderGradient1)
drawutils.setBrushColour(dc,colours.borderGradient1)
dc.DrawRectangle(0,0,size[0],size[1])
def __borderDrawing(self,dc):
size=self.GetClientSize()
pen=wx.Pen(colours.border_shadow_dark)
pen.SetWidth(2)
pen.SetCap(wx.CAP_BUTT)
dc.SetPen(pen)
dc.DrawLine(1,0,1,size[1])
dc.DrawLine(1,size[1]-1,size[0],size[1]-1)
pen=wx.Pen(colours.defaultPen)
pen.SetWidth(1)
dc.SetPen(pen)
|
barnone/EigenD
|
app_browser2/belcanto_panel.py
|
Python
|
gpl-3.0
| 3,432
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Acl(AutotoolsPackage):
"""Commands for Manipulating POSIX Access Control Lists."""
homepage = "https://savannah.nongnu.org/projects/acl"
url = "https://git.savannah.nongnu.org/cgit/acl.git/snapshot/acl-2.2.53.tar.gz"
version('2.2.53', sha256='9e905397ac10d06768c63edd0579c34b8431555f2ea8e8f2cee337b31f856805')
version('2.2.52', sha256='f3f31d2229c903184ff877aa0ee658b87ec20fec8aebb51e65eaa68d7b24e629')
version('2.2.51', sha256='31a43d96a274a39bfcb805fb903d45840515344884d224cef166b482693a9f48')
version('2.2.50', sha256='39e21d623a9f0da8c042cde346c01871b498d51400e92c2ab1490d5ffd724401')
version('2.2.49', sha256='c6e01460cac4e47673dd60a7f57b970b49f6998bb564eff141cca129aa8940d1')
version('2.2.48', sha256='877eaeccc1500baec58391935b46ac7dfc5ffd8c54fbc0385ccd8b2b18ac3fa6')
depends_on('m4', type='build')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('attr')
depends_on('gettext')
def setup_build_environment(self, env):
env.append_flags('LDFLAGS', '-lintl')
def autoreconf(self, spec, prefix):
bash = which('bash')
bash('./autogen.sh')
|
LLNL/spack
|
var/spack/repos/builtin/packages/acl/package.py
|
Python
|
lgpl-2.1
| 1,448
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf.yamlutil import custom_tree_to_tagged_tree
from astropy.time import TimeDelta
from astropy.time.tests.test_delta import (allclose_jd, allclose_jd2,
allclose_sec)
from ...types import AstropyType
__all__ = ['TimeDeltaType']
class TimeDeltaType(AstropyType):
name = 'time/timedelta'
types = [TimeDelta]
version = '1.0.0'
@classmethod
def to_tree(cls, obj, ctx):
return custom_tree_to_tagged_tree(obj.info._represent_as_dict(), ctx)
@classmethod
def from_tree(cls, node, ctx):
return TimeDelta.info._construct_from_dict(node)
@classmethod
def assert_equal(cls, old, new):
assert allclose_jd(old.jd, new.jd)
assert allclose_jd2(old.jd2, new.jd2)
assert allclose_sec(old.sec, new.sec)
|
bsipocz/astropy
|
astropy/io/misc/asdf/tags/time/timedelta.py
|
Python
|
bsd-3-clause
| 908
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_takestep_explorer.ui'
#
# Created: Wed Apr 24 11:11:57 2013
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(854, 611)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setContentsMargins(5, -1, -1, -1)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.label = QtGui.QLabel(self.centralwidget)
self.label.setText(_fromUtf8(""))
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout_2.addWidget(self.label)
self.show3d = Show3DWithSlider(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.show3d.sizePolicy().hasHeightForWidth())
self.show3d.setSizePolicy(sizePolicy)
self.show3d.setObjectName(_fromUtf8("show3d"))
self.verticalLayout_2.addWidget(self.show3d)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setContentsMargins(0, -1, -1, -1)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label_2 = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setMaximumSize(QtCore.QSize(200, 16777215))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout.addWidget(self.label_2)
self.listMinima = QtGui.QListWidget(self.centralwidget)
self.listMinima.setMaximumSize(QtCore.QSize(200, 16777215))
self.listMinima.setObjectName(_fromUtf8("listMinima"))
self.verticalLayout.addWidget(self.listMinima)
self.horizontalLayout.addLayout(self.verticalLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 854, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtGui.QToolBar(MainWindow)
self.toolBar.setObjectName(_fromUtf8("toolBar"))
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionDisplace = QtGui.QAction(MainWindow)
self.actionDisplace.setObjectName(_fromUtf8("actionDisplace"))
self.actionQuench = QtGui.QAction(MainWindow)
self.actionQuench.setObjectName(_fromUtf8("actionQuench"))
self.actionShow_path = QtGui.QAction(MainWindow)
self.actionShow_path.setCheckable(True)
self.actionShow_path.setChecked(False)
self.actionShow_path.setObjectName(_fromUtf8("actionShow_path"))
self.toolBar.addAction(self.actionDisplace)
self.toolBar.addAction(self.actionQuench)
self.toolBar.addAction(self.actionShow_path)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "Energy (id)", None, QtGui.QApplication.UnicodeUTF8))
self.toolBar.setWindowTitle(QtGui.QApplication.translate("MainWindow", "toolBar", None, QtGui.QApplication.UnicodeUTF8))
self.actionDisplace.setText(QtGui.QApplication.translate("MainWindow", "displace", None, QtGui.QApplication.UnicodeUTF8))
self.actionQuench.setText(QtGui.QApplication.translate("MainWindow", "quench", None, QtGui.QApplication.UnicodeUTF8))
self.actionShow_path.setText(QtGui.QApplication.translate("MainWindow", "show path", None, QtGui.QApplication.UnicodeUTF8))
self.actionShow_path.setToolTip(QtGui.QApplication.translate("MainWindow", "display the quench trajectory", None, QtGui.QApplication.UnicodeUTF8))
from pygmin.gui.show3d_with_slider import Show3DWithSlider
|
js850/PyGMIN
|
pygmin/gui/ui/ui_takestep_explorer.py
|
Python
|
gpl-3.0
| 5,129
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import CustomerLabelServiceTransport
from .grpc import CustomerLabelServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[CustomerLabelServiceTransport]]
_transport_registry["grpc"] = CustomerLabelServiceGrpcTransport
__all__ = (
"CustomerLabelServiceTransport",
"CustomerLabelServiceGrpcTransport",
)
|
googleads/google-ads-python
|
google/ads/googleads/v10/services/services/customer_label_service/transports/__init__.py
|
Python
|
apache-2.0
| 1,061
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
def _sigmoid(x):
xp = backend.get_array_module(x)
half = x.dtype.type(0.5)
return xp.tanh(x * half) * half + half
def _peephole(func, c, h, x):
xp = backend.get_array_module(x)
with cuda.get_device_from_array(x):
lstm_in = x.dot(func.upward.W.data.T)
lstm_in += h.dot(func.lateral.W.data.T)
lstm_in = xp.reshape(lstm_in, (len(lstm_in),
lstm_in.shape[1] // 4,
4))
a, i, f, o = xp.split(lstm_in, 4, 2)
a = xp.reshape(a, (len(a), a.shape[1]))
i = xp.reshape(i, (len(i), i.shape[1]))
f = xp.reshape(f, (len(f), f.shape[1]))
o = xp.reshape(o, (len(o), o.shape[1]))
peep_in_i = c.dot(func.peep_i.W.data.T)
peep_in_f = c.dot(func.peep_f.W.data.T)
a = xp.tanh(a)
i = _sigmoid(i + peep_in_i)
f = _sigmoid(f + peep_in_f)
c_next = a * i + f * c
peep_in_o = c_next.dot(func.peep_o.W.data.T)
o = _sigmoid(o + peep_in_o)
y = o * xp.tanh(c_next)
return c_next, y
@testing.parameterize(
{'in_size': 10, 'out_size': 10},
{'in_size': 10, 'out_size': 40},
)
class TestPeephole(unittest.TestCase):
def setUp(self):
self.link = links.StatefulPeepholeLSTM(self.in_size, self.out_size)
upward = self.link.upward.W.data
upward[...] = numpy.random.uniform(-1, 1, upward.shape)
lateral = self.link.lateral.W.data
lateral[...] = numpy.random.uniform(-1, 1, lateral.shape)
peep_i = self.link.peep_i.W.data
peep_i[...] = numpy.random.uniform(-1, 1, peep_i.shape)
peep_f = self.link.peep_f.W.data
peep_f[...] = numpy.random.uniform(-1, 1, peep_f.shape)
peep_o = self.link.peep_o.W.data
peep_o[...] = numpy.random.uniform(-1, 1, peep_o.shape)
c_shape = (1, self.out_size)
h_shape = (1, self.out_size)
x_shape = (4, self.in_size)
gy_shape = (4, self.out_size)
self.c = numpy.zeros(c_shape).astype(numpy.float32)
self.h = numpy.zeros(h_shape).astype(numpy.float32)
self.x = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(numpy.float32)
def _forward(self, link, x):
return link(x)
def check_forward(self, c_data, h_data, x_data):
x = chainer.Variable(x_data)
h1 = self.link(x)
c1_expect, h1_expect = _peephole(self.link, c_data, h_data, x_data)
testing.assert_allclose(h1.data, h1_expect)
testing.assert_allclose(self.link.c.data, c1_expect)
testing.assert_allclose(self.link.h.data, h1_expect)
h2 = self.link(x)
c2_expect, h2_expect = _peephole(self.link,
c1_expect, h1_expect, x_data)
testing.assert_allclose(h2.data, h2_expect)
testing.assert_allclose(self.link.c.data, c2_expect)
testing.assert_allclose(self.link.h.data, h2_expect)
def test_forward_cpu(self):
self.check_forward(self.c, self.h, self.x)
@attr.gpu
def test_forward_gpu(self):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.c),
cuda.to_gpu(self.h),
cuda.to_gpu(self.x))
@attr.multi_gpu(2)
def test_forward_gpu_multi(self):
with cuda.get_device_from_id(0):
self.link.to_gpu()
c = cuda.to_gpu(self.c)
h = cuda.to_gpu(self.h)
x = cuda.to_gpu(self.x)
with cuda.get_device_from_id(1):
self.check_forward(c, h, x)
def check_backward(self, c_data, h_data, x_data, y_grad):
x = chainer.Variable(x_data)
y = self._forward(self.link, x)
y.grad = y_grad
y.backward()
def f():
c, y = _peephole(self.link, c_data, h_data, x_data)
return y,
gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))
testing.assert_allclose(gx, x.grad, atol=1e-3)
def test_backward_cpu(self):
self.check_backward(self.c, self.h, self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.c),
cuda.to_gpu(self.h),
cuda.to_gpu(self.x),
cuda.to_gpu(self.gy))
class TestPeepholeState(unittest.TestCase):
def setUp(self):
in_size, out_size = 10, 8
self.link = links.StatefulPeepholeLSTM(in_size, out_size)
def check_reset_state(self):
self.link.reset_state()
self.assertIsNone(self.link.c)
self.assertIsNone(self.link.h)
def test_reset_state_cpu(self):
self.check_reset_state()
@attr.gpu
def test_reset_state_gpu(self):
self.link.to_gpu()
self.check_reset_state()
class TestPeepholeToCPUToGPU(unittest.TestCase):
def setUp(self):
in_size, out_size = 10, 8
self.link = links.StatefulPeepholeLSTM(in_size, out_size)
self.c = chainer.Variable(
numpy.random.uniform(-1, 1, (1, out_size)).astype(numpy.float32))
self.h = chainer.Variable(
numpy.random.uniform(-1, 1, (1, out_size)).astype(numpy.float32))
def check_to_cpu(self, c, h):
self.link.c = c
self.link.h = h
self.link.to_cpu()
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
self.link.to_cpu()
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
def test_to_cpu_cpu(self):
self.check_to_cpu(self.c, self.h)
@attr.gpu
def test_to_cpu_gpu(self):
self.c.to_gpu()
self.h.to_gpu()
self.check_to_cpu(self.c, self.h)
def check_to_cpu_to_gpu(self, c, h):
self.link.c = c
self.link.h = h
self.link.to_gpu()
self.assertIs(self.link.xp, cuda.cupy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
self.link.to_gpu()
self.assertIs(self.link.xp, cuda.cupy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
self.link.to_cpu()
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
self.link.to_gpu()
self.assertIs(self.link.xp, cuda.cupy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
@attr.gpu
def test_to_cpu_to_gpu_cpu(self):
self.check_to_cpu_to_gpu(self.c, self.h)
@attr.gpu
def test_to_cpu_to_gpu_gpu(self):
self.c.to_gpu()
self.h.to_gpu()
self.check_to_cpu_to_gpu(self.c, self.h)
testing.run_module(__name__, __file__)
|
jnishi/chainer
|
tests/chainer_tests/links_tests/connection_tests/test_peephole.py
|
Python
|
mit
| 7,504
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Modbus documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 13 21:13:48 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'uModbus'
copyright = '2019, Auke Willem Oosterhoff <auke@orangetux.nl>'
author = 'Auke Willem Oosterhoff <auke@orangetux.nl>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['globaltoc.html', 'searchbox.html'],
'**': ['globaltoc.html', 'relations.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Modbusdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Modbus.tex', 'Modbus Documentation',
'Auke Willem Oosterhoff \\textless{}a.oosterhoff@climotion.com\\textgreater{}', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'modbus', 'Modbus Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Modbus', 'Modbus Documentation',
author, 'Modbus', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autodoc_member_order = 'bysource'
|
AdvancedClimateSystems/python-modbus
|
docs/source/conf.py
|
Python
|
mpl-2.0
| 9,496
|
print 'Hello, world! And x is %d' % x
x = x + 1
print 'Also x is %d' % x
x = x + 1
print 'Also x is %d' % x
x = x + 1
print 'Also x is %d' % x
x = x + 1
print 'Also x is %d' % x
caster.sayMoo()
caster.mutableNumber.add (1)
result = True
|
kibertoad/swampmachine
|
swampmachine-scripting/src/test/resources/net/kiberion/swampmachine/scripting/jython/test.py
|
Python
|
apache-2.0
| 238
|
import numpy as np
import simtk.openmm as mm
import simtk.openmm.app as app
import simtk.unit as u
eu = u.kilocalories_per_mole
temperature = 300 * u.kelvin
friction = 1.0 / u.picosecond
pdb = app.PDBFile('pdb/resi-1-15.pdb')
forcefield = app.ForceField('amber99sb.xml',"tip3p.xml")
m = app.modeller.Modeller(pdb.topology , pdb.positions )
m.addSolvent(forcefield, padding=0.7*u.nanometer)
system = forcefield.createSystem(pdb.topology, nonbondedMethod=app.PME,nonbondedCutoff=0.95 * u.nanometer, constraints=app.HAngles)
integrator = mm.LangevinIntegrator(temperature, friction, 0.004*u.picoseconds)
simulation = app.Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
simulation.reporters.append(app.DCDReporter("resi-1-15.dcd", 250))
print("running")
simulation.step(100000000)
|
kyleabeauchamp/AlphaSynuclein
|
python/solvate.py
|
Python
|
apache-2.0
| 830
|
import re
import csv
import ipaddress
__version__ = 1.0
# Each route will have the following values
class Route_Template(object):
def __init__(self):
self.route = {}
self.protocol = []
self.metric = []
self.next_hop = []
self.age = []
self.interface = []
def __repr__(self):
return str(self.route)
# The main code structure
class RouteParse(object):
def __init__(self):
self.route_table = {}
self.Read_File()
self.Generate_Output_To_File()
# Retrieve a route object if it exists
def Get_Route_Object(self,target_route):
for route in self.route_table:
if target_route in route:
return self.route_table[route]
return None
# If the regular expression picked up a valid route, extract the values into a temporary dictionary
def Get_Route_Values_From_Match(self,matchObj):
values = {}
for keyword, value in vars(Route_Template()).items():
if keyword in matchObj.groupdict():
val = str(matchObj.group(keyword).strip())
values[keyword] = val
else:
values[keyword] = "N/A"
return values
# Create a new route object using the values from the temporary dictionary
def Create_New_Route(self,match):
route = self.Get_Route_Values_From_Match(match)
route_prefix = route["route"]
if not self.Get_Route_Object(route_prefix):
NewRoute = Route_Template()
NewRoute.route = route["route"]
self.route_table[NewRoute.route] = NewRoute
# Check the detail for the route and append it to the object
def Add_Route_Detail(self,previous_route,line):
route = self.Get_Route_Object(previous_route)
route_patterns = [r'via (?P<next_hop>.*), (?P<interface>.*), (?P<metric>\[.*]), (?P<age>.*?), (?P<protocol>.*)', \
r'via (?P<next_hop>.*), (?P<metric>\[.*]), (?P<age>.*?), (?P<protocol>.*)']
for pattern in route_patterns:
match = re.search(pattern,line)
if match:
route.next_hop.append(match.group('next_hop').strip())
route.metric.append(match.group('metric').strip())
route.age.append(match.group('age').strip())
route.protocol.append(match.group('protocol').strip().replace(",","_"))
try:
route.interface.append(match.group('interface').strip())
except IndexError:
route.interface.append("N/A")
break
def Get_Host_Range(self,subnet):
try:
range = ipaddress.ip_network(subnet)
return range[1],range[-2]
except ValueError:
return "error", "error"
except IndexError: # Handles /32
return range[0], ""
def Generate_Output_To_File(self):
try:
with open('routes.csv', 'w', newline='') as csv_file:
spamwriter = csv.writer(csv_file, delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['Route', 'Protocol','Metric','Next Hop','Age','Interface','From Range','To Range'])
for entry in sorted(self.route_table):
route = self.Get_Route_Object(entry)
first_ip, last_ip = self.Get_Host_Range(route)
for no in range(len(route.protocol)):
spamwriter.writerow([route.route,
route.protocol[no],
route.metric[no],
route.next_hop[no],
route.age[no],
route.interface[no],
first_ip,
last_ip])
print (" -- Output saved to 'routes.csv'")
except:
print (" -- Unable to write to routes.csv, if the file is already open close it.")
def Read_File(self):
start_processing_routes = False
invalid_phrases = ["subnetted"]
with open("routes.txt","r") as route_file:
for line in route_file:
#-----------------------
# Ignore certain input
#-----------------------
if line.count(' ') < 2:
continue
if any(x in line for x in invalid_phrases):
continue
if "<string>" in line:
start_processing_routes = True
continue
line = line.strip().replace("\n","")
if start_processing_routes:
regex = r'(?P<route>[0-9].*), ubest/mbest: (?P<value>.*)'
match = re.search(regex,line)
if match:
self.Create_New_Route(match)
last_route = match.group('route').strip()
continue
self.Add_Route_Detail(last_route, line)
print ("Cisco NXOS Route Parser version: '{}'".format(__version__))
c = RouteParse()
|
abbacode/ciscorouteparser
|
route_parse_nxos.py
|
Python
|
mit
| 5,396
|
import warnings
from django.core.exceptions import MiddlewareNotUsed
from django.db import connection, transaction
from django.utils.deprecation import RemovedInDjango18Warning
class TransactionMiddleware(object):
"""
Transaction middleware. If this is enabled, each view function will be run
with commit_on_response activated - that way a save() doesn't do a direct
commit, the commit is done when a successful response is created. If an
exception happens, the database is rolled back.
"""
def __init__(self):
warnings.warn(
"TransactionMiddleware is deprecated in favor of ATOMIC_REQUESTS.",
RemovedInDjango18Warning, stacklevel=2)
if connection.settings_dict['ATOMIC_REQUESTS']:
raise MiddlewareNotUsed
def process_request(self, request):
"""Enters transaction management"""
transaction.enter_transaction_management()
def process_exception(self, request, exception):
"""Rolls back the database and leaves transaction management"""
if transaction.is_dirty():
# This rollback might fail because of network failure for example.
# If rollback isn't possible it is impossible to clean the
# connection's state. So leave the connection in dirty state and
# let request_finished signal deal with cleaning the connection.
transaction.rollback()
transaction.leave_transaction_management()
def process_response(self, request, response):
"""Commits and leaves transaction management."""
if not transaction.get_autocommit():
if transaction.is_dirty():
# Note: it is possible that the commit fails. If the reason is
# closed connection or some similar reason, then there is
# little hope to proceed nicely. However, in some cases (
# deferred foreign key checks for exampl) it is still possible
# to rollback().
try:
transaction.commit()
except Exception:
# If the rollback fails, the transaction state will be
# messed up. It doesn't matter, the connection will be set
# to clean state after the request finishes. And, we can't
# clean the state here properly even if we wanted to, the
# connection is in transaction but we can't rollback...
transaction.rollback()
transaction.leave_transaction_management()
raise
transaction.leave_transaction_management()
return response
|
912/M-new
|
virtualenvironment/experimental/lib/python2.7/site-packages/django/middleware/transaction.py
|
Python
|
gpl-2.0
| 2,703
|
"""bluesky.modules.extrafiles
"""
__author__ = "Joel Dubowy"
__all__ = [
'run'
]
__version__ = "0.1.0"
import os
from bluesky.config import Config
from bluesky.exceptions import BlueSkyConfigurationError
from bluesky.extrafilewriters import (
emissionscsv, firescsvs, smokeready
)
EXTRA_FILE_WRITERS = {
'emissionscsv': emissionscsv.EmissionsCsvWriter,
'firescsvs': firescsvs.FiresCsvsWriter,
'smokeready': smokeready.SmokeReadyWriter
}
def run(fires_manager):
"""runs the extrafiles module
Args:
- fires_manager -- bluesky.models.fires.FiresManager object
"""
file_sets = [m.lower() for m in Config().get('extrafiles', 'sets')]
fires_manager.processed(__name__, __version__, sets=file_sets)
dest_dir = _get_dest_dir()
writers = get_extra_file_writers(file_sets, dest_dir)
# fires_manager.extrafiles.output.directory needed by export module
fires_manager.extrafiles = {
'output': {
'directory': dest_dir
}
}
for file_set, writer in writers:
fires_manager.extrafiles[file_set] = writer.write(fires_manager)
def get_extra_file_writers(file_sets, dest_dir):
writers = []
for file_set in file_sets:
writer_klass = EXTRA_FILE_WRITERS.get(file_set)
if not writer_klass:
raise BlueSkyConfigurationError("Invalid writer - {}".format(
writer_klass))
writers.append(
(file_set, writer_klass(dest_dir))
)
return writers
def _get_dest_dir():
dest_dir = Config().get('extrafiles', 'dest_dir')
if not dest_dir:
raise BlueSkyConfigurationError("Specify extrafiles destination dir "
"('config' > 'extrafiles' > 'dest_dir')")
dest_dir = os.path.abspath(dest_dir)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
return dest_dir
|
pnwairfire/bluesky
|
bluesky/modules/extrafiles.py
|
Python
|
gpl-3.0
| 1,867
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import copy
import datetime
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from nova.compute.monitors import base as monitor_base
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import pci_device_pool
from nova import rpc
from nova import test
from nova.tests.unit.pci import fakes as pci_fakes
from nova.tests import uuidsentinel
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_COERCED = {'virt_stat': '10'}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.1',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
'parent_addr': '0000:00:01.0',
},
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.2',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
'parent_addr': '0000:00:01.0',
},
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_PF,
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
},
{
'label': 'label_8086_0123',
'dev_type': 'type-PCI',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0123',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
},
{
'label': 'label_8086_7891',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '7891',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': None,
'parent_addr': '0000:08:01.0',
},
] if self.pci_support else []
self.pci_stats = [
{
'count': 2,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1,
'dev_type': fields.PciDeviceType.SRIOV_VF
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1,
'dev_type': fields.PciDeviceType.SRIOV_PF
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '7891',
'numa_node': None,
'dev_type': fields.PciDeviceType.SRIOV_VF
},
] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology._to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
@mock.patch('stevedore.enabled.EnabledExtensionManager')
def setUp(self, _mock_ext_mgr):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self._set_pci_passthrough_whitelist()
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._instance_types = {}
self.stubs.Set(objects.InstanceList, 'get_by_host_and_node',
self._fake_instance_get_by_host_and_node)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
self.compute = self._create_compute_node()
self.updated = False
self.deleted = False
self.update_call_count = 0
def _set_pci_passthrough_whitelist(self):
self.flags(pci_passthrough_whitelist=[
'{"vendor_id": "8086", "product_id": "0443"}',
'{"vendor_id": "8086", "product_id": "7891"}'])
def _create_compute_node(self, values=None):
# This creates a db representation of a compute_node.
compute = {
"id": 1,
"uuid": uuidsentinel.fake_compute_node,
"service_id": 1,
"host": "fakehost",
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": '{"num_instances": "1"}',
"hypervisor_hostname": "fakenode",
'hypervisor_version': 1,
'hypervisor_type': 'fake-hyp',
'disk_available_least': None,
'host_ip': None,
'metrics': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'cpu_allocation_ratio': None,
'ram_allocation_ratio': None,
'disk_allocation_ratio': None,
}
if values:
compute.update(values)
return compute
def _create_compute_node_obj(self, context):
# Use the db representation of a compute node returned
# by _create_compute_node() to create an equivalent compute
# node object.
compute = self._create_compute_node()
compute_obj = objects.ComputeNode()
compute_obj = objects.ComputeNode._from_db_object(
context, compute_obj, compute)
return compute_obj
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
"report_count": 0,
'disabled': False,
'disabled_reason': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'last_seen_up': None,
'forced_down': False,
'version': 0,
}
return service
def _fake_instance_obj(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
instance_uuid = str(uuid.uuid1())
instance = objects.Instance(context=self.context, uuid=instance_uuid,
flavor=flavor)
instance.update({
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': {},
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
})
if stash:
instance.old_flavor = flavor
instance.new_flavor = flavor
instance.numa_topology = kwargs.pop('numa_topology', None)
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
instance_type = objects.Flavor(**instance_type)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_by_host_and_node(self, context, host, nodename,
expected_attrs=None):
return objects.InstanceList(
objects=[i for i in self._instances.values() if i['host'] == host])
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node_obj(self.context)
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
@mock.patch('nova.objects.Instance.save')
def test_disabled_instance_context_claim(self, mock_save):
# instance context manager variation:
instance = self._fake_instance_obj()
self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance_obj(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stub_out('nova.db.service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stub_out('nova.db.compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stub_out('nova.db.compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node(values)
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
# return no compute node
raise exception.ComputeHostNotFound(host=host)
def test_create_compute_node(self):
self.tracker.compute_node = None
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.tracker = self._tracker()
self._migrations = {}
self.stub_out('nova.db.service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stub_out('nova.db.compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stub_out('nova.db.compute_node_update',
self._fake_compute_node_update)
self.stub_out('nova.db.compute_node_delete',
self._fake_compute_node_delete)
self.stub_out('nova.db.migration_update',
self._fake_migration_update)
self.stub_out('nova.db.migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
self.compute = self._create_compute_node()
return self.compute
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = list(self._migrations.values())[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology,
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def assertEqualPciDevicePool(self, expected, observed):
self.assertEqual(expected.product_id, observed.product_id)
self.assertEqual(expected.vendor_id, observed.vendor_id)
self.assertEqual(expected.tags, observed.tags)
self.assertEqual(expected.count, observed.count)
def assertEqualPciDevicePoolList(self, expected, observed):
ex_objs = expected.objects
ob_objs = observed.objects
self.assertEqual(len(ex_objs), len(ob_objs))
for i in range(len(ex_objs)):
self.assertEqualPciDevicePool(ex_objs[i], ob_objs[i])
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, objects.NUMATopology.obj_from_db_obj(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_ram_mb)
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_disk_gb)
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected = pci_device_pool.from_pci_stats(driver.pci_stats)
self.assertEqual(len(expected),
len(self.tracker.compute_node.pci_device_pools))
for expected_pool, actual_pool in zip(
expected, self.tracker.compute_node.pci_device_pools):
self.assertEqual(expected_pool, actual_pool)
def test_set_instance_host_and_node(self):
inst = objects.Instance()
with mock.patch.object(inst, 'save') as mock_save:
self.tracker._set_instance_host_and_node(self.context, inst)
mock_save.assert_called_once_with()
self.assertEqual(self.tracker.host, inst.host)
self.assertEqual(self.tracker.nodename, inst.node)
self.assertEqual(self.tracker.host, inst.launched_on)
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def test_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
urs_mock = self.tracker.scheduler_client.update_resource_stats
self.tracker._update(self.context)
urs_mock.reset_mock()
# change a compute node value to simulate a change
self.tracker.compute_node.local_gb_used += 1
self.tracker._update(self.context)
urs_mock.assert_called_once_with(self.tracker.compute_node)
def test_no_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
self.tracker._update(self.context)
update = self.tracker.scheduler_client.update_resource_stats
update.reset_mock()
self.tracker._update(self.context)
self.assertFalse(update.called, "update_resource_stats should not be "
"called when there is no change")
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected_pools = pci_device_pool.from_pci_stats(driver.pci_stats)
observed_pools = self.tracker.compute_node.pci_device_pools
self.assertEqualPciDevicePoolList(expected_pools, observed_pools)
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def test_set_empty_ext_resources(self):
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
self.assertEqual({}, resources.stats)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": "12"}
self.assertEqual(sorted(expected),
sorted(resources.stats))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=mem),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return objects.NUMATopology(
cells=[objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([]))])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(3)
instance_topology = self._instance_topology(3)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD}
instance = self._fake_instance_obj(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_additive_claims(self, mock_save, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node.memory_mb_used)
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node.local_gb_used)
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node.vcpus_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_context_claim_with_exception(self, mock_save, mock_get):
instance = self._fake_instance_obj(memory_mb=1, root_gb=1,
ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance_obj(task_state=task_states.SCHEDULING)
with mock.patch.object(instance, 'save'):
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node.current_workload)
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.current_workload)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_cpu_stats(self, mock_save, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
vcpus = 1
instance = self._fake_instance_obj(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance_obj(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance_obj(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_deleted_instances_with_migrations(self, mock_migration_list):
migration = objects.Migration(context=self.context,
migration_type='resize',
instance_uuid='invalid')
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_instances_with_live_migrations(self, mock_instance_list,
mock_migration_list):
instance = self._fake_instance_obj()
migration = objects.Migration(context=self.context,
migration_type='live-migration',
instance_uuid=instance.uuid)
mock_migration_list.return_value = [migration]
mock_instance_list.return_value = [instance]
with mock.patch.object(self.tracker, '_pair_instances_to_migrations'
) as mock_pair:
self.tracker.update_available_resource(self.context)
self.assertTrue(mock_pair.called)
self.assertEqual(
instance.uuid,
mock_pair.call_args_list[0][0][0][0].instance_uuid)
self.assertEqual(instance.uuid,
mock_pair.call_args_list[0][0][1][0].uuid)
self.assertEqual(
['system_metadata', 'numa_topology', 'flavor',
'migration_context'],
mock_instance_list.call_args_list[0][1]['expected_attrs'])
self.assertEqual(FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(ROOT_GB + EPHEMERAL_GB,
self.tracker.compute_node['local_gb_used'])
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
def test_pair_instances_to_migrations(self):
migrations = [objects.Migration(instance_uuid=uuidsentinel.instance1),
objects.Migration(instance_uuid=uuidsentinel.instance2)]
instances = [objects.Instance(uuid=uuidsentinel.instance2),
objects.Instance(uuid=uuidsentinel.instance1)]
self.tracker._pair_instances_to_migrations(migrations, instances)
order = [uuidsentinel.instance1, uuidsentinel.instance2]
for i, migration in enumerate(migrations):
self.assertEqual(order[i], migration.instance.uuid)
@mock.patch('nova.compute.claims.Claim')
@mock.patch('nova.objects.Instance.save')
def test_claim_saves_numa_topology(self, mock_save, mock_claim):
def fake_save():
self.assertEqual(set(['numa_topology', 'host', 'node',
'launched_on']),
inst.obj_what_changed())
mock_save.side_effect = fake_save
inst = objects.Instance(host=None, node=None, memory_mb=1024)
inst.obj_reset_changes()
numa = objects.InstanceNUMATopology()
claim = mock.MagicMock()
claim.claimed_numa_topology = numa
mock_claim.return_value = claim
with mock.patch.object(self.tracker, '_update_usage_from_instance'):
self.tracker.instance_claim(self.context, inst)
mock_save.assert_called_once_with()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_sets_instance_host_and_node(self, mock_get):
instance = self._fake_instance_obj()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class _MoveClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(_MoveClaimTestCase, self).setUp()
self.instance = self._fake_instance_obj()
self.instance_type = self._fake_flavor_create()
self.claim_method = self.tracker._move_claim
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get, mock_save):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.claim_method(
self.context, self.instance, self.instance_type, limits=limits)
mock_save.assert_called_once_with()
mock_save.reset_mock()
instance2 = self._fake_instance_obj()
self.claim_method(
self.context, instance2, self.instance_type, limits=limits)
mock_save.assert_called_once_with()
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_move_type_not_tracked(self, mock_get, mock_save):
self.claim_method(self.context, self.instance, self.instance_type,
limits=self.limits, move_type="live-migration")
mock_save.assert_called_once_with()
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.Instance.save')
@mock.patch.object(objects.Migration, 'save')
def test_existing_migration(self, save_mock, save_inst_mock):
migration = objects.Migration(self.context, id=42,
instance_uuid=self.instance.uuid,
source_compute='fake-other-compute',
source_node='fake-other-node',
status='accepted',
migration_type='evacuation')
self.claim_method(self.context, self.instance, self.instance_type,
migration=migration)
self.assertEqual(self.tracker.host, migration.dest_compute)
self.assertEqual(self.tracker.nodename, migration.dest_node)
self.assertEqual("pre-migrating", migration.status)
self.assertEqual(1, len(self.tracker.tracked_migrations))
save_mock.assert_called_once_with()
save_inst_mock.assert_called_once_with()
class ResizeClaimTestCase(_MoveClaimTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
self.claim_method = self.tracker.resize_claim
def test_move_type_not_tracked(self):
self.skipTest("Resize_claim does already sets the move_type.")
def test_existing_migration(self):
self.skipTest("Resize_claim does not support having existing "
"migration record.")
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance_obj()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
@mock.patch.object(resource_tracker.LOG, 'warning')
def test_get_host_metrics_exception(self, mock_LOG_warning):
monitor = mock.MagicMock()
monitor.add_metrics_to_list.side_effect = Exception
self.tracker.monitors = [monitor]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_LOG_warning.assert_called_once_with(
u'Cannot get the metrics from %s.', mock.ANY)
self.assertEqual(0, len(metrics))
def test_get_host_metrics(self):
class FakeCPUMonitor(monitor_base.MonitorBase):
NOW_TS = timeutils.utcnow()
def __init__(self, *args):
super(FakeCPUMonitor, self).__init__(*args)
self.source = 'FakeCPUMonitor'
def get_metric_names(self):
return set(["cpu.frequency"])
def get_metrics(self):
return [("cpu.frequency", 100, self.NOW_TS)]
self.tracker.monitors = [FakeCPUMonitor(None)]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [
{
'timestamp': FakeCPUMonitor.NOW_TS.isoformat(),
'name': 'cpu.frequency',
'value': 100,
'source': 'FakeCPUMonitor'
},
]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def test_virt_stats(self):
# start with virt driver stats
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.items():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class UpdateUsageFromMigrationsTestCase(BaseTrackerTestCase):
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
def test_no_migrations(self, mock_update_usage):
migrations = []
self.tracker._update_usage_from_migrations(self.context, migrations)
self.assertFalse(mock_update_usage.called)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_instance_not_found(self, mock_get_instance, mock_update_usage):
mock_get_instance.side_effect = exception.InstanceNotFound(
instance_id='some_id',
)
migration = objects.Migration(
context=self.context,
instance_uuid='some_uuid',
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, 'some_uuid')
self.assertFalse(mock_update_usage.called)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_update_usage_called(self, mock_get_instance, mock_update_usage):
instance = self._fake_instance_obj()
mock_get_instance.return_value = instance
migration = objects.Migration(
context=self.context,
instance_uuid=instance.uuid,
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
mock_update_usage.assert_called_once_with(
self.context, instance, None, migration)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_flavor_not_found(self, mock_get_instance, mock_update_usage):
mock_update_usage.side_effect = exception.FlavorNotFound(flavor_id='')
instance = self._fake_instance_obj()
mock_get_instance.return_value = instance
migration = objects.Migration(
context=self.context,
instance_uuid=instance.uuid,
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
mock_update_usage.assert_called_once_with(
self.context, instance, None, migration)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_not_resizing_state(self, mock_get_instance, mock_update_usage):
instance = self._fake_instance_obj()
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.SUSPENDING
mock_get_instance.return_value = instance
migration = objects.Migration(
context=self.context,
instance_uuid=instance.uuid,
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
self.assertFalse(mock_update_usage.called)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_use_most_recent(self, mock_get_instance, mock_update_usage):
instance = self._fake_instance_obj()
mock_get_instance.return_value = instance
migration_2002 = objects.Migration(
id=2002,
context=self.context,
instance_uuid=instance.uuid,
updated_at=datetime.datetime(2002, 1, 1, 0, 0, 0),
)
migration_2003 = objects.Migration(
id=2003,
context=self.context,
instance_uuid=instance.uuid,
updated_at=datetime.datetime(2003, 1, 1, 0, 0, 0),
)
migration_2001 = objects.Migration(
id=2001,
context=self.context,
instance_uuid=instance.uuid,
updated_at=datetime.datetime(2001, 1, 1, 0, 0, 0),
)
self.tracker._update_usage_from_migrations(
self.context, [migration_2002, migration_2003, migration_2001])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
mock_update_usage.assert_called_once_with(
self.context, instance, None, migration_2003)
|
dims/nova
|
nova/tests/unit/compute/test_resource_tracker.py
|
Python
|
apache-2.0
| 57,495
|
#!/usr/bin/env python
#
# Copyright (C) 2009-2010:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
Test pickle retention arbiter.
"""
import os
import copy
from shinken_test import unittest, ShinkenTest
from shinken.daemons.arbiterdaemon import Arbiter
from shinken.objects.module import Module
from shinken.modulesctx import modulesctx
pickle_retention_file_generic = modulesctx.get_module('pickle_retention_file_generic')
get_instance = pickle_retention_file_generic.get_instance
modconf = Module()
modconf.module_name = "PickleRetentionGeneric"
modconf.module_type = pickle_retention_file_generic.properties['type']
modconf.properties = pickle_retention_file_generic.properties.copy()
class TestPickleRetentionArbiter(ShinkenTest):
# setUp is inherited from ShinkenTest
def test_pickle_retention(self):
# get our modules
mod = pickle_retention_file_generic.Pickle_retention_generic(
modconf, 'tmp/retention-test.dat')
try:
os.unlink(mod.path)
except:
pass
sl = get_instance(mod)
# Hack here :(
sl.properties = {}
sl.properties['to_queue'] = None
sl.init()
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
self.scheduler_loop(1, [[svc, 2, 'BAD | value1=0 value2=0']])
self.sched.get_new_broks()
# Saving the broks we got
old_broks = copy.copy(self.sched.broks)
# Now get a real broker object
arbiter = Arbiter([''], False, False, False, None, None, None)
arbiter.broks = self.sched.broks
sl.hook_save_retention(arbiter) #, l)
# update the hosts and service in the scheduler in the retention-file
# Now we clean the source, like if we restart
arbiter.broks.clear()
self.assertEqual(len(arbiter.broks), 0)
r = sl.hook_load_retention(arbiter)
# We check we load them :)
for b in old_broks.values():
found = False
for b2 in arbiter.broks.values():
if b2.id == b.id:
found = True
break
self.assertTrue(found)
# Ok, we can delete the retention file
os.unlink(mod.path)
if __name__ == '__main__':
unittest.main()
|
staute/shinken-mod-pickle-retention-file-generic
|
test/test_module_pickle_retention_arbiter.py
|
Python
|
agpl-3.0
| 3,039
|
#!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import urllib
import matplotlib.pyplot as plt
import cv2
# the directory to store the movie preview clips in
image_directory = "/home/greg/Databases/chimp/images/"
# connect to the mongodb server
client = pymongo.MongoClient()
db = client['chimp_2015-05-03']
subjects = db["chimp_subjects"]
def mse(imageA, imageB):
# taken from
# http://www.pyimagesearch.com/2014/09/15/python-compare-two-images/
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
true_positives = []
false_positives = []
# iterate over a set of subjects
# for each subject get the retirement reason - used to create gold standard data
for ii,s in enumerate(subjects.find().limit(250)):
print ii
id_ = s["zooniverse_id"]
preview_url = s["location"]["previews"][0][0][:-5]
reason = s["metadata"]["retire_reason"]
# down every preview clip for this subject
for i in range(1,16):
url = preview_url + str(i) + ".jpg"
fname = id_+"_"+str(i)+".jpg"
if not(os.path.isfile(image_directory+fname)):
urllib.urlretrieve(url, image_directory+fname)
# find the maximum mse between all pairs of images
differences = []
for i in range(1,16):
for j in range(i+1,16):
fname1 = image_directory+id_+"_"+str(i)+".jpg"
f1 = cv2.imread(fname1)
fname2 = image_directory+id_+"_"+str(j)+".jpg"
f2 = cv2.imread(fname2)
f1 = cv2.cvtColor(f1,cv2.COLOR_BGR2GRAY)
f2 = cv2.cvtColor(f2,cv2.COLOR_BGR2GRAY)
differences.append(mse(f1,f2))
# add the threshold value to either the false positive (if the movie was
# classified as blank by users) or true positive
if reason == "blank":
false_positives.append(max(differences))
else:
true_positives.append(max(differences))
# create the ROC curve
alphas = true_positives[:]
alphas.extend(false_positives)
alphas.sort()
X = []
Y = []
for a in alphas:
X.append(len([x for x in false_positives if x >= a])/float(len(false_positives)))
Y.append(len([y for y in true_positives if y >= a])/float(len(true_positives)))
print len(false_positives)
plt.plot(X,Y)
plt.xlabel("False Positive Count")
plt.ylabel("True Positive Count")
plt.show()
|
zooniverse/aggregation
|
experimental/algorithms/chimp.py
|
Python
|
apache-2.0
| 2,689
|
import json
import re
import sys
debug = False # toggle debug to print broken lines
# All the individual fields we care about
last = r'(?P<lastname>[A-z]+)'
first = r'(?P<firstname>[A-z]+(\s\w\.)?)'
phone = r'(?P<phonenumber>.+)'
color = r'(?P<color>[A-z ]+)'
zipcode = r'(?P<zipcode>\d{5})'
delim = r',\s+'
# Build the known csv formats from the fields
patterns = map(re.compile, [
# Lastname, Firstname, (703)-742-0996, Blue, 10013
delim.join([last, first, phone, color, zipcode + "$"]),
# Firstname Lastname, Red, 11237, 703 955 0373
delim.join([first + " " + last, color, zipcode, phone]),
# Firstname, Lastname, 10013, 646 111 0101, Green
delim.join([first, last, zipcode, phone, color])
])
class Rolodex():
"""Parses and stores contact records from csv."""
def __init__(self): # noqa
self.errors = [] # Store the entry number of any records failed to be parsed
self.entries = [] # Correctly parsed entries (unsorted)
self.processed = 0 # Number of raw records we've processed
def insert(self, line):
"""
Insert a new contact into this Rolodex.
@param line: csv input string matching one of the known patterns.
"""
self.processed += 1
data = {}
for pattern in patterns:
m = pattern.match(line)
if not m:
continue
phone = re.sub("[^0-9]", "", m.group("phonenumber"))
if len(phone) == 10:
data["phonenumber"] = "{}-{}-{}".format(phone[:3], phone[3:6], phone[6:])
elif len(phone) == 7:
data["phonenumber"] = "{}-{}".format(phone[:3], phone[3:7])
else:
break # Invalid phone number
for field in ["lastname", "firstname", "color", "zipcode"]:
data[field] = m.group(field)
if data:
self.entries.append(data)
else:
if debug:
print >> sys.stderr, line
self.errors.append(self.processed - 1)
def export(self):
"""Export this object's errors and entries as a dict."""
self.entries.sort(key=lambda d: d["lastname"] + d["firstname"])
return {
"entries": self.entries,
"errors": self.errors
}
def export_json(self, indent=2):
"""Export this object's errors and entries as json formatted string."""
return json.dumps(self.export(), indent=indent, sort_keys=True)
if __name__ == "__main__":
"""
Parse a csv rolodex and export the data as json to a file.
Reads either the lines from files passed as args or stdin.
"""
import fileinput
rolodex = Rolodex()
for line in fileinput.input():
rolodex.insert(line)
with open('result.out', 'w') as f:
f.write(rolodex.export_json())
|
preds/kata
|
rolodex/rolodex.py
|
Python
|
gpl-3.0
| 2,873
|
# coding=utf-8
from qgis.core import qgsfunction
from safe.common.parameters.resource_parameter import ResourceParameter
from safe.definitions.fields import (
under_5_displaced_count_field,
over_60_displaced_count_field)
from safe.definitions.reports.infographic import (
infographic_header,
map_overview_header,
people_section_header,
age_gender_section_header,
vulnerability_section_header,
female_vulnerability_section_header,
minimum_needs_section_header,
additional_minimum_needs_section_header,
population_chart_header,
inasafe_logo_white, age_gender_section_notes, minimum_needs_section_notes)
from safe.definitions.units import exposure_unit
from safe.definitions.utilities import definition
from safe.utilities.i18n import tr
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
group = tr('InaSAFE - Infographic Elements')
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[])
def inasafe_logo_white_path(feature, parent):
"""Retrieve the full path of inasafe-logo-white.svg
"""
_ = feature, parent # NOQA
return inasafe_logo_white['path']
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[])
def inasafe_field_header(field, feature, parent):
"""Retrieve a header name of the field name from definitions.
"""
_ = feature, parent # NOQA
age_fields = [under_5_displaced_count_field, over_60_displaced_count_field]
symbol_mapping = {
'over': '>',
'under': '<'
}
field_definition = definition(field, 'field_name')
if field_definition:
if field_definition in age_fields:
header_format = tr('{symbol} {age} y.o')
field_name = field_definition.get('field_name')
if field_name:
symbol, age = field_name.split('_')[:2]
if symbol.lower() in symbol_mapping.keys():
header_name = header_format.format(
symbol=symbol_mapping[symbol.lower()],
age=age
)
return header_name
header_name = field_definition.get('header_name')
name = field_definition.get('name')
if header_name:
return header_name.capitalize()
else:
return name.capitalize()
return None
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[])
def minimum_needs_unit(field, feature, parent):
"""Retrieve units of the given minimum needs field name.
"""
_ = feature, parent # NOQA
field_definition = definition(field, 'field_name')
if field_definition:
unit_abbreviation = None
frequency = None
if field_definition.get('need_parameter'):
need = field_definition['need_parameter']
if isinstance(need, ResourceParameter):
unit_abbreviation = need.unit.abbreviation
frequency = need.frequency
elif field_definition.get('unit'):
need_unit = field_definition.get('unit')
unit_abbreviation = need_unit.get('abbreviation')
if field_definition.get('frequency') and not frequency:
frequency = field_definition.get('frequency')
if not unit_abbreviation:
unit_abbreviation = exposure_unit['plural_name']
once_frequency_field_keys = [
'minimum_needs__toilets_count_field'
]
if not frequency or (
field_definition['key'] in once_frequency_field_keys):
return unit_abbreviation.lower()
unit_format = u'{unit_abbreviation}/{frequency}'
return unit_format.format(
unit_abbreviation=unit_abbreviation, frequency=frequency).lower()
return None
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[])
def infographic_header_element(impact_function_name, feature, parent):
"""Given an impact function name, it will format it to an
infographic header sentence.
"""
_ = feature, parent # NOQA
string_format = infographic_header['string_format']
if impact_function_name:
header = string_format.format(
impact_function_name=impact_function_name)
return header.capitalize()
return None
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[])
def map_overview_header_element(feature, parent):
"""Retrieve map overview header string from definitions.
"""
_ = feature, parent # NOQA
header = map_overview_header['string_format']
return header.capitalize()
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[])
def population_chart_header_element(feature, parent):
"""Retrieve population chart header string from definitions.
"""
_ = feature, parent # NOQA
header = population_chart_header['string_format']
return header.capitalize()
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[])
def people_section_header_element(feature, parent):
"""Retrieve people section header string from definitions.
"""
_ = feature, parent # NOQA
header = people_section_header['string_format']
return header.capitalize()
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[])
def age_gender_section_header_element(feature, parent):
"""Retrieve age gender section header string from definitions.
"""
_ = feature, parent # NOQA
header = age_gender_section_header['string_format']
return header.capitalize()
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[])
def age_gender_section_notes_element(feature, parent):
"""Retrieve age gender section notes string from definitions.
"""
_ = feature, parent # NOQA
notes = age_gender_section_notes['string_format']
return notes.capitalize()
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[])
def vulnerability_section_header_element(feature, parent):
"""Retrieve vulnerability section header string from definitions.
"""
_ = feature, parent # NOQA
header = vulnerability_section_header['string_format']
return header.capitalize()
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[])
def female_vulnerability_section_header_element(feature, parent):
"""Retrieve female vulnerability section header string from definitions.
"""
_ = feature, parent # NOQA
header = female_vulnerability_section_header['string_format']
return header.capitalize()
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[])
def minimum_needs_section_header_element(feature, parent):
"""Retrieve minimum needs section header string from definitions.
"""
_ = feature, parent # NOQA
header = minimum_needs_section_header['string_format']
return header.capitalize()
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[])
def additional_minimum_needs_section_header_element(feature, parent):
"""Retrieve additional minimum needs section header string
from definitions.
"""
_ = feature, parent # NOQA
header = additional_minimum_needs_section_header['string_format']
return header.capitalize()
@qgsfunction(
args='auto', group=group, usesGeometry=False, referencedColumns=[])
def minimum_needs_section_notes_element(feature, parent):
"""Retrieve minimum needs section notes string from definitions.
"""
_ = feature, parent # NOQA
notes = minimum_needs_section_notes['string_format']
return notes.capitalize()
|
akbargumbira/inasafe
|
safe/report/expressions/infographic.py
|
Python
|
gpl-3.0
| 7,891
|
# *********************************************
# PyOFTK.ossm() test
# Example from A. de Toroker et al.
# Bragg Soliton
#
#
#
# Author: Martin Laprise
# Universite Laval
# martin.laprise.1@ulaval.ca
#
# *********************************************
from numpy import *
from pylab import *
from PyOFTK.utilities import *
import PyOFTK
import time
t1 = time.time()
storeResults = 0
nbrPoints_z = 10000
# Duree de pulse desire [ps]
dureePulse = 47.6948
puissanceCrete = 478.8
LMBD = 1.03
nt = 24000
positionDetecteur = 4999
### Creation d'un fbg ###
fbgStart = 2000
fbgStop = 8000
# Pas du reseaux
#Longueur_onde_bragg = 1.03238
Longueur_onde_bragg = 1.03105615
#Longueur_onde_bragg = 1.0289438500000001
GrandLambda = Longueur_onde_bragg/(2*1.45)
# Difference d'indice
deltaIndex = 3.695e-3
fbg1 = PyOFTK.apodizedFBG(2.5, 62.5, 0.05, 0.0, 40e-2, deltaIndex, GrandLambda)
#gamma = fbg1.nlGamma(1.03)
gamma = 0.2
detZero = fbg1.detuning(LMBD)
detVec = zeros(nbrPoints_z,double)
detVec[fbgStart:fbgStop] += detZero
kapZero = fbg1.kappa(LMBD)
kapVec = zeros(nbrPoints_z,double)
#iz = arange(1.0,nbrPoints_z+1)/nbrPoints_z
#kapVec[3000:nbrPoints_z-1] = kapZero*(iz[3000:nbrPoints_z-1])
kapVec[fbgStart:fbgStop] = kapZero
# Initialisation du champ incident
d = linspace(-nbrPoints_z/2, nbrPoints_z/2, nbrPoints_z)
z = linspace(0,fbg1.length, nbrPoints_z)
dz = fbg1.length / nbrPoints_z
Vg = 1.0 / fbg1.beta1(LMBD)
dt = 1e12*(dz / Vg)
dtdz = dureePulse/dt
champ_in = zeros(nbrPoints_z, complex)
pulse = PyOFTK.sechPulse(d,dtdz,0,puissanceCrete,0)
champ_in = PyOFTK.shift(pulse, (nbrPoints_z/2)-4*int(dtdz))
[u_plus, u_moins, u_plus_archive, u_moins_archive, detecteur] = PyOFTK.ossm(champ_in, fbg1.length, nt, 0.0, fbg1.beta1(LMBD), kapVec, detVec, gamma, 50, 1.0, positionDetecteur)
elapsed = time.time() - t1
print "************ Simulation Parameters ************ "
print "Pulse Energy: " + str(pow(abs(pulse),2).sum()*dt) + " nJ"
print "FBG Length: " + str(fbg1.length) + " m"
print "Temporel iterations number: " + str(nt)
print "Total duration: " + str(nt*dt) + " ps"
print "Kappa: " + str(kapZero) + " [1/m]"
print "Gamma: " + str(gamma) + "m-1 W-1"
print "Detuning: " + str(detZero) + " [1/m]"
print "Detuning: " + str(1000*(LMBD-Longueur_onde_bragg)) + " [nm]"
print "Detuning: " + str(-((2.998e8)*1000*(LMBD-Longueur_onde_bragg))/(pow(LMBD*1000,2))) + " [GHz]"
print "Processing time: " + str(elapsed) + " secondes" + " (" + str(elapsed/60) + " minutes)"
print "********************************************** "
if storeResults == 1:
PyOFTK.store2hdf5('./temp/z', z)
PyOFTK.store2hdf5('./temp/kapVec', kapVec)
PyOFTK.store2hdf5('./temp/detVec', detVec)
PyOFTK.store2hdf5('./temp/umoins', u_moins_archive)
PyOFTK.store2hdf5('./temp/uplus', u_plus_archive)
#plot(z,pow(abs(champ_in),2), z,pow(abs(u_plus),2), z,pow(abs(u_moins),2))
#legend(("Champ initiale","u+","u-"))
#grid(True)
#show()
PyOFTK.ossmOutputMP4('bragg_soliton',u_plus_archive.T,u_moins_archive.T, z*1000, kapVec, 20, 0, 0)
|
mlaprise/PyOFTK
|
PyOFTK/examples/bragg_soliton_cpu.py
|
Python
|
gpl-2.0
| 3,054
|
from .ExtendedFormatter import ExtendedFormatter as _ExtendedFormatter
from .sql import *
eformat = _ExtendedFormatter()
|
DavidWhittingham/arcpyext
|
arcpyext/_str/__init__.py
|
Python
|
bsd-3-clause
| 121
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(max_length=512, verbose_name=b'Adress')),
('author', models.ForeignKey(verbose_name=b'Anv\xc3\xa4ndare', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128, blank=True)),
('email', models.EmailField(max_length=75, blank=True)),
('phone', models.CharField(max_length=128, null=True, blank=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('added_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Description',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(max_length=512, verbose_name=b'Beskrivning')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(verbose_name=b'Anv\xc3\xa4ndare', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-timestamp'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('caption', models.CharField(max_length=128, blank=True)),
('image', models.ImageField(upload_to=b'images')),
('thumbnail', models.ImageField(upload_to=b'thumbnails', blank=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('added_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Venue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128, verbose_name=b'Namn')),
('audience_min', models.IntegerField(blank=True)),
('audience_max', models.IntegerField(blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='image',
name='venue',
field=models.ForeignKey(to='web.Venue'),
preserve_default=True,
),
migrations.AddField(
model_name='description',
name='venue',
field=models.ForeignKey(to='web.Venue'),
preserve_default=True,
),
migrations.AddField(
model_name='contact',
name='venue',
field=models.ForeignKey(to='web.Venue'),
preserve_default=True,
),
migrations.AddField(
model_name='address',
name='venue',
field=models.OneToOneField(to='web.Venue'),
preserve_default=True,
),
]
|
eriklavander/klaviaturregistret
|
web/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 4,011
|
from django.shortcuts import render, redirect, get_object_or_404
from bookshelf.forms import (SearchBookForm, AddBookForm,
EditAuthorForm, PostReviewForm,
EditGenreForm)
from bookshelf.models import Book, Author, Genre, BookReview
from collections import OrderedDict
from random import sample
def home_page(request):
form = SearchBookForm()
books = get_random(Book.objects.all(), 5)
authors = get_random(Author.objects.all(), 5)
genres = get_random(Genre.objects.all(), 5)
return render(request, 'homepage.html', locals())
def get_random(objects, number):
count = len(objects)
if count:
random_indexes = sample(range(len(objects)), number)
random_objects = [objects[random_index] for random_index in random_indexes]
return random_objects
def see_book(request, book_id):
book = get_object_or_404(Book, pk=book_id)
other_books = set(Book.objects.filter(author=book.author).exclude(title=book.title))
other_books = other_books.union(Book.objects.filter(genre=book.genre).exclude(title=book.title))
return render(request, 'book.html', locals())
def see_author(request, author_id):
author = get_object_or_404(Author, pk=author_id)
genres = set([book.genre for book in author.book_set.all()])
return render(request, 'author.html', locals())
def edit_author(request, author_id):
form = EditAuthorForm()
return render(request, 'edit_author.html', locals())
def save_edit_author(request, author_id):
form = EditAuthorForm(data=request.POST)
if form.is_valid():
new_biography = form.cleaned_data.get('biography')
author = get_object_or_404(Author, pk=author_id)
author.biography = new_biography
author.save()
return redirect(author)
else:
return render(request, 'homepage.html', {'form': SearchBookForm()})
def edit_genre(request, genre_id):
form = EditGenreForm()
return render(request, 'edit_genre.html', locals())
def save_edit_genre(request, genre_id):
form = EditGenreForm(data=request.POST)
if form.is_valid():
genre = get_object_or_404(Genre, pk=genre_id)
genre.name = form.cleaned_data.get('name')
genre.description = form.cleaned_data.get('description')
genre.save()
return redirect(genre)
else:
return render(request, 'homepage.html', {'form': SearchBookForm()})
def see_genre(request, genre_id):
genre = get_object_or_404(Genre, pk=genre_id)
return render(request, 'genre.html', locals())
def add_book_form(request):
form = AddBookForm()
return render(request, 'add_book.html', locals())
def add_book(request):
form = AddBookForm(data=request.POST)
if form.is_valid():
book_author = get_author_of_book(request.POST['author_name'])
book_genre = get_genre_of_book(request.POST['genre_name'])
book = Book.objects.create(title=form.cleaned_data.get('title'),
author=book_author,
genre=book_genre,
synopsis=form.cleaned_data.get('synopsis'),
pub_date=form.cleaned_data.get('publication_date'))
return redirect(book)
else:
return render(request, 'homepage.html', {'form': SearchBookForm()})
def get_book(request):
books = Book.objects.filter(title=request.POST['title'])
if len(books) < 1:
return add_book_form(request)
else:
wanted_book = books[0]
return redirect(wanted_book)
def list_books(request):
alphabetic_books = get_alphabetic_dictionary(Book.objects.all())
return render(request, 'list_books.html', locals())
def high_rated_books(request):
rated_books = [book for book in Book.objects.all()
if book.rating() != BookReview.NO_RATING]
highly_rated_books = sorted(rated_books,
key=lambda book: book.rating(),
reverse=True)
return render(request, 'highest_rated_books.html', locals())
def list_authors(request):
alphabetic_authors = get_alphabetic_dictionary(Author.objects.all())
return render(request, 'list_authors.html', locals())
def list_genres(request):
alphabetic_genres = get_alphabetic_dictionary(Genre.objects.all())
return render(request, 'list_genres.html', locals())
def get_author_of_book(author_name):
authors = Author.objects.filter(name=author_name)
if len(authors) < 1:
author = Author.objects.create(name=author_name)
return author
else:
return authors[0]
def get_genre_of_book(genre_name):
genres = Genre.objects.filter(name=genre_name)
if len(genres) < 1:
genre = Genre.objects.create(name=genre_name)
return genre
else:
return genres[0]
def about(request):
book_count = Book.objects.count()
author_count = Author.objects.count()
genre_count = Genre.objects.count()
return render(request, 'about.html', locals())
def get_alphabetic_dictionary(objects):
objects = sorted(objects)
alphabetic_dictionary = {first_letter:
[object for object in objects
if str(object).startswith(first_letter)]
for first_letter in set(str(object)[0]
for object in objects)}
return OrderedDict(sorted(alphabetic_dictionary.items(), key=lambda t: t[0]))
def post_review(request, book_id):
form = PostReviewForm()
return render(request, 'post_review.html', locals())
def save_review(request, book_id):
form = PostReviewForm(data=request.POST)
if form.is_valid():
book = Book.objects.get(pk=book_id)
BookReview.objects.create(text=form.cleaned_data.get('text'),
reviewer=get_user(request),
score=form.cleaned_data.get('score'),
book=book)
return redirect(book)
else:
return render(request, 'homepage.html', {'form': SearchBookForm()})
def get_user(request):
if request.user.is_authenticated():
return request.user
|
tdhris/MyBookShelf
|
bookshelf/views.py
|
Python
|
gpl-2.0
| 6,306
|
from __future__ import absolute_import, division, unicode_literals
from six import text_type
import re
from codecs import register_error, xmlcharrefreplace_errors
from .constants import voidElements, booleanAttributes, spaceCharacters
from .constants import rcdataElements, entities, xmlEntities
from . import treewalkers, _utils
from xml.sax.saxutils import escape
_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`"
_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]")
_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars +
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n"
"\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15"
"\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000"
"\u2001\u2002\u2003\u2004\u2005\u2006\u2007"
"\u2008\u2009\u200a\u2028\u2029\u202f\u205f"
"\u3000]")
_encode_entity_map = {}
_is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((_is_ucs4 and len(v) > 1) or
(not _is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = _utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in _encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
_encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = _encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error("htmlentityreplace", htmlentityreplace_errors)
def serialize(input, tree="etree", encoding=None, **serializer_opts):
"""Serializes the input token stream using the specified treewalker
:arg input: the token stream to serialize
:arg tree: the treewalker to use
:arg encoding: the encoding to use
:arg serializer_opts: any options to pass to the
:py:class:`html5lib.serializer.HTMLSerializer` that gets created
:returns: the tree serialized as a string
Example:
>>> from html5lib.html5parser import parse
>>> from html5lib.serializer import serialize
>>> token_stream = parse('<html><body><p>Hi!</p></body></html>')
>>> serialize(token_stream, omit_optional_tags=False)
'<html><head></head><body><p>Hi!</p></body></html>'
"""
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
s = HTMLSerializer(**serializer_opts)
return s.render(walker(input), encoding)
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = "legacy" # be secure by default
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer
:arg inject_meta_charset: Whether or not to inject the meta charset.
Defaults to ``True``.
:arg quote_attr_values: Whether to quote attribute values that don't
require quoting per legacy browser behavior (``"legacy"``), when
required by the standard (``"spec"``), or always (``"always"``).
Defaults to ``"legacy"``.
:arg quote_char: Use given quote character for attribute quoting.
Defaults to ``"`` which will use double quotes unless attribute
value contains a double quote, in which case single quotes are
used.
:arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute
values.
Defaults to ``False``.
:arg escape_rcdata: Whether to escape characters that need to be
escaped within normal elements within rcdata elements such as
style.
Defaults to ``False``.
:arg resolve_entities: Whether to resolve named character entities that
appear in the source tree. The XML predefined entities < >
& " ' are unaffected by this setting.
Defaults to ``True``.
:arg strip_whitespace: Whether to remove semantically meaningless
whitespace. (This compresses all whitespace to a single space
except within ``pre``.)
Defaults to ``False``.
:arg minimize_boolean_attributes: Shortens boolean attributes to give
just the attribute value, for example::
<input disabled="disabled">
becomes::
<input disabled>
Defaults to ``True``.
:arg use_trailing_solidus: Includes a close-tag slash at the end of the
start tag of void elements (empty elements whose end tag is
forbidden). E.g. ``<hr/>``.
Defaults to ``False``.
:arg space_before_trailing_solidus: Places a space immediately before
the closing slash in a tag using a trailing solidus. E.g.
``<hr />``. Requires ``use_trailing_solidus=True``.
Defaults to ``True``.
:arg sanitize: Strip all unsafe or unknown constructs from output.
See :py:class:`html5lib.filters.sanitizer.Filter`.
Defaults to ``False``.
:arg omit_optional_tags: Omit start/end tags that are optional.
Defaults to ``True``.
:arg alphabetical_attributes: Reorder attributes to be in alphabetical order.
Defaults to ``False``.
"""
unexpected_args = frozenset(kwargs) - frozenset(self.options)
if len(unexpected_args) > 0:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args)))
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "htmlentityreplace")
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
# pylint:disable=too-many-nested-blocks
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from .filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# Alphabetical attributes is here under the assumption that none of
# the later filters add or change order of attributes; it needs to be
# before the sanitizer so escaped elements come out correctly
if self.alphabetical_attributes:
from .filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from .filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from .filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from .filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError("System identifer contains both single and double quote characters")
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError("Unexpected </ in CDATA")
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
for (_, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple()) and
k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values == "always" or len(v) == 0:
quote_attr = True
elif self.quote_attr_values == "spec":
quote_attr = _quoteAttributeSpec.search(v) is not None
elif self.quote_attr_values == "legacy":
quote_attr = _quoteAttributeLegacy.search(v) is not None
else:
raise ValueError("quote_attr_values must be one of: "
"'always', 'spec', or 'legacy'")
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError("Comment contains --")
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError("Entity %s not recognized" % name)
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
"""Serializes the stream from the treewalker into a string
:arg treewalker: the treewalker to serialize
:arg encoding: the string encoding to use
:returns: the serialized tree
Example:
>>> from html5lib import parse, getTreeWalker
>>> from html5lib.serializer import HTMLSerializer
>>> token_stream = parse('<html><body>Hi!</body></html>')
>>> walker = getTreeWalker('etree')
>>> serializer = HTMLSerializer(omit_optional_tags=False)
>>> serializer.render(walker(token_stream))
'<html><head></head><body>Hi!</body></html>'
"""
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
class SerializeError(Exception):
"""Error in serialized tree"""
pass
|
sserrot/champion_relationships
|
venv/Lib/site-packages/bleach/_vendor/html5lib/serializer.py
|
Python
|
mit
| 15,746
|
########################################################################
## File name: generate_readme.py ##
## Author: Rick van Rheenen ##
## Date created: 2016-04-14 ##
## Date last modified: 2016-04-16 ##
## Description: README.MD generator for OpenKattis github repository ##
########################################################################
##
## Instructions:
## Execute from the root of the OpenKattis Folder containing
## the language folders which contain the problems.
## EG: python3 scripts/generate_readme.py > README.MD
##
## TODO:
## - catch failed cUrls
## - document this script..
import os
import io
import random
import pycurl
from bs4 import BeautifulSoup
from operator import attrgetter
def make_readme(problems):
'''print the readme'''
print("# OpenKattis")
print("My solutions to the problems on https://open.kattis.com/.\n")
print("Kattis code by Mylène Martodihardjo.\n")
print("OpenKattis script made by Rick van Rheenen (https://github.com/rvrheenen/OpenKattis).\n")
print("## Problems")
solved_problems = problems.search("solved", True)
solved_amount = solved_problems.count()
print(make_table(["Problem", "Language", "Difficulty"], solved_problems.get(["link", "language", "difficulty"]), "lmm", "Solved Problems:"))
print("#### Total solved: " + str(solved_amount))
for lang in solved_problems.get_distinct_vars("language"):
print("###### Solved in " + lang + ": " + str(solved_problems.search("language", lang).count() ) + " (" + str(round((solved_problems.search("language", lang).count() / solved_amount) * 100, 2)) + "%)")
print("#### Average score: " + str(round(solved_problems.get_total_score()/solved_problems.count(),2)) )
print("#### Total score: " + str(1 + solved_problems.get_total_score()))
highest = solved_problems.search("difficulty", str(max([float(x[0]) for x in solved_problems.get("difficulty")])) )
print(make_table(["Problem", "Language", "Difficulty"], highest.get(["link", "language", "difficulty"]), None, "Highest difficulty solved"))
print(make_table(["Problem", "Language", "Difficulty"], problems.search("solved", False).sort("difficulty").get(["link", "language", "difficulty"]), "lmm", "Unsolved Problems:"))
def main():
make_readme(get_problems().sort())
def debug():
problems = ProblemsList()
problems.add(Problem("hello", "Python", True))
make_readme(problems)
def make_table(head, rows, aligns=None, title=None):
'''Generates a table with given input, returns String'''
if len(rows.problems if isinstance(rows, ProblemsList) else rows) < 1:
return ''
table = "#### " + title + "\n" if title != None else ""
table += make_table_row(head)
align = parse_aligns(aligns if aligns != None else ("l" + "m"*(len(head)-1)))
table += make_table_row(align)
if isinstance(rows, list):
if not isinstance(rows[0], list):
rows = [rows]
for row in rows.get() if isinstance(rows, ProblemsList) else rows:
table += make_table_row(row.get() if isinstance(row, Problem) else row)
return table
def make_table_row(line):
'''Generates a single table row from the input'''
return "".join("| "+str(x)+ " " for x in line) + "|\n"
def parse_aligns(align):
'''Converts a list or string of [l,r,m] into md syntax for table alignment, returns list'''
ret = []
markup = {'l':':---', 'm':':---:', 'r':'---:'}
for k in align:
ret.append(markup[k])
return ret
def get_problems():
'''finds all problems as Problems adds them to ProblemsList and returns ProblemsList'''
known_languages = "C C# C++ Go Haskell Java Javascript Objective-C PHP Prolog Python Ruby".split()
problems = ProblemsList()
for lang in get_folders():
if lang in known_languages:
for problem_folder in get_folders(lang):
listdir = os.listdir(get_path([lang,problem_folder]))
if ".ignore" in listdir:
continue
solved = False if ".unsolved" in listdir else True
problems.add(Problem(problem_folder, lang, solved))
return problems
def get_path(dir = None, path = None):
'''Gets path from given dir and path. If none given returns root of file. Assumes root is parent directory.'''
if path == None:
path = os.path.abspath(".")
if dir != None:
if type(dir) == str:
dir = [dir]
for d in dir:
path += os.sep + d
return path
def get_folders(folder = None):
'''Gets all folders in folder, returns as list'''
dir = get_path(folder)
return [d for d in os.listdir(dir) if d not in get_ignores() and os.path.isdir(get_path(d, dir)) and d[0] != "."]
def get_ignores():
'''Gets all entries in the .gitignore, returns as list'''
try:
return [x.rstrip(os.sep) for x in open(get_path('.gitignore'),'r').read().splitlines()]
except (OSError, IOError):
return []
class Problem:
'''An individual problem'''
def __init__(self, id, language=None, solved=None):
'''Creates a new Problem, with given parameters. id may also be a Problem, then its information will be copied. '''
if isinstance(id, Problem):
self.id = id.id
self.language = id.language
self.solved = id.solved
else:
self.id = id
self.language = language
self.solved = solved
for x in self.scrape_kattis(self.id):
setattr(self, x[0], x[1])
setattr(self, "link", self.get_flink())
def __repr__(self):
return "[%s]" % "".join([x + ": " + str(getattr(self, x))+", " for x in vars(self)])[:-2]
def get(self, atrrs = None):
'''Get a problem, with all (default) or only a few of its attributes, returns list'''
if atrrs == None:
return [getattr(self, x) for x in self.get_var_names()]
else:
if type(atrrs) == str:
atrrs = [atrrs]
return [getattr(self, x) for x in atrrs if x in self.get_var_names()]
def get_var_names(self):
'''Get the names of all this problems variables, returns list'''
return [x for x in vars(self)]
def scrape_kattis(self, pname=None):
''' Scrapes the OpenKattis site to get the needed data on this problem.
Returns list with lists: [atrrname, atrr]
Current atrrnames: time, memory, difficulty, authors, source and name
'''
url = 'https://open.kattis.com/problems/' + (pname.lower() if pname != None else self.id)
e = io.BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEFUNCTION, e.write)
c.setopt(pycurl.USERAGENT, 'Mozilla/5.0 (Windows; U; Windows NT 6.1; it; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729)') #prevents browser banning
c.perform()
c.close()
body = e.getvalue().decode('UTF-8')
soup = BeautifulSoup(body, 'html.parser')
kattis_attributes = []
for s in soup.find_all("div", {"class": "sidebar-info"}):
for p in s.find_all("p"):
t = p.find_all(text=True)
t = [x for x in t if x != "\n"]
t[0] = "".join(x for x in t[0].strip().rstrip(':').lower().split(" ")[0] if x not in '(){}<>') #nasty
if len(t) < 2 or t[0] in ['problem','license', 'download']:
continue
if not t in kattis_attributes:
kattis_attributes.append(t)
kattis_attributes.append(["url", url])
kattis_attributes.append(["name", str(soup.title.string).replace("– Kattis, Kattis", "")])
return kattis_attributes
def get_flink(self):
'''Get formatted link, returns string'''
try:
return "[%s] (%s)" % (self.name, self.url)
except AttributeError:
return "[%s] (%s)" % (self.id, self.url)
class ProblemsList:
'''List of all problems'''
def __init__(self, probs = None):
'''Create new List of problems. If probs is given copy it.'''
self.problems = [] if probs == None else probs.copy()
def __repr__(self):
return repr(self.problems)
def add(self, problem):
'''Add Problem to the list'''
self.problems.append(problem)
def get(self, atrrs = None):
'''Get all problem, with all (default) or only a few of its attributes, returns list of Problems.get()'''
return [p.get(atrrs) for p in self.problems]
def sort(self, sortby="name", rv=False):
'''Sorts the ProblemsList, returns self'''
if self.count() == 0:
return self
if not hasattr(self.get_random(), sortby):
sortby = "id"
self.problems = sorted(self.problems, key=attrgetter(sortby), reverse=rv)
return self
def count(self, type = None, q = None):
'''Count problems, count all if no type and q given, else count occurence of type=q, return int'''
return len(self.problems) if type == None and q == None else self.search(type, q).count()
def search(self, type, q):
'''Find all occurence of type=q in problems, return ProblemsList'''
return ProblemsList([x for x in self.problems if getattr(x, type) == q])
def get_total_score(self, type=None, q=None):
'''Count scores of problems, count all if no type and q given, else count score of type=q, return int'''
count = 0
for prob in (self.problems if type == None and q == None else self.search(type, q).problems):
count += float(prob.get("difficulty")[0])
return round(count,1)
def get_distinct_vars(self, var):
'''Returns all distinct occurrences of var in problems, returns list'''
distinct = []
for p in self.problems:
a = getattr(p, var)
if a not in distinct:
distinct.append(a)
return distinct
def get_random(self):
'''Returns random Problem'''
return self.problems[random.randrange(0, self.count())]
if __name__ == "__main__": main()
|
iMylene/kattis
|
scripts/generate_readme.py
|
Python
|
mit
| 10,424
|
from main import Yify
def start():
return Yify()
config = [{
'name': 'yify',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'Yify',
'description': 'Free provider, less accurate. Small HD movies, encoded by <a href="https://yify-torrents.com/">Yify</a>.',
'wizard': False,
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': 0
},
{
'name': 'domain',
'advanced': True,
'label': 'Proxy server',
'description': 'Domain for requests, keep empty to let CouchPotato pick.',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
}
]
}]
|
rooi/CouchPotatoServer
|
couchpotato/core/providers/torrent/yify/__init__.py
|
Python
|
gpl-3.0
| 1,735
|
## Errors for all modules
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
"""
Errors raised by Biskit scripts.
"""
class BiskitError( Exception ):
"""
Root for all Errors raised by Biskit scripts.
"""
pass
class HandledError( BiskitError ):
"""
Error raised by the ErrorHandler after an Error has been reported.
"""
pass
class FatalError( HandledError ):
"""
Error raised by the ErrorHandler after a fatal Error has been reported.
"""
pass
class NormalError( HandledError ):
"""
Error raised by the ErrorHandler after a normal Error has been reported.
"""
pass
class XplorInputError( BiskitError ):
"""
Errors raised while generating xplor input script
"""
pass
##################
## (FAKE) TESTING
###################
import biskit.test as BT
class Test(BT.BiskitTest):
"""Error test"""
def test_Errors( self ):
"""Errors test (empty test)"""
pass
|
graik/biskit
|
biskit/errors.py
|
Python
|
gpl-3.0
| 1,755
|
#!/usr/bin/env python
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*-
#
# NetProfile: Access module - Models
# © Copyright 2013-2015 Alex 'Unik' Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division
)
__all__ = [
'AccessBlock',
'AccessEntity',
'AccessEntityLink',
'AccessEntityLinkType',
'PerUserRateModifier',
'AcctAddProcedure',
'AcctAuthzProcedure',
'AcctPollProcedure',
'AcctRateModsProcedure',
'AcctRollbackProcedure',
'CheckAuthFunction',
'AccessblockExpireEvent',
'AcctPollEvent'
]
import datetime as dt
from sqlalchemy import (
Boolean,
Column,
DateTime,
FetchedValue,
ForeignKey,
Index,
Sequence,
TIMESTAMP,
Unicode,
UnicodeText,
func,
text
)
from sqlalchemy.orm import (
backref,
relationship
)
from sqlalchemy.ext.associationproxy import association_proxy
from netprofile.db.connection import (
Base,
DBSession
)
from netprofile.db.fields import (
ASCIIString,
DeclEnum,
Money,
NPBoolean,
Traffic,
UInt8,
UInt16,
UInt32,
UInt64,
npbool
)
from netprofile.db.ddl import (
Comment,
CurrentTimestampDefault,
InArgument,
InOutArgument,
OutArgument,
SQLEvent,
SQLFunction,
SQLFunctionArgument,
Trigger
)
from netprofile.ext.columns import MarkupColumn
from netprofile.ext.wizards import (
SimpleWizard,
Wizard,
Step,
ExternalWizardField
)
from netprofile.ext.data import (
ExtModel,
_name_to_class
)
from netprofile.common.hooks import register_hook
from pyramid.i18n import (
TranslationStringFactory,
get_localizer
)
from pyramid.threadlocal import get_current_request
from netprofile_entities.models import (
Entity,
EntityType
)
_ = TranslationStringFactory('netprofile_access')
EntityType.add_symbol('access', ('access', _('Access'), 50))
@register_hook('np.wizard.init.entities.Entity')
def _wizcb_aent_init(wizard, model, req):
def _wizcb_aent_submit(wiz, em, step, act, val, req):
sess = DBSession()
em = ExtModel(AccessEntity)
obj = AccessEntity()
# Work around field name clash
if 'state' in val:
del val['state']
em.set_values(obj, val, req, True)
sess.add(obj)
return {
'do' : 'close',
'reload' : True
}
wizard.steps.append(Step(
ExternalWizardField('AccessEntity', 'password'),
ExternalWizardField('AccessEntity', 'stash'),
ExternalWizardField('AccessEntity', 'rate'),
id='ent_access1', title=_('Access entity properties'),
on_prev='generic',
on_submit=_wizcb_aent_submit
))
class AccessState(DeclEnum):
"""
Enumeration of access entity status codes
"""
ok = 0, _('OK'), 10
block_auto = 1, _('Blocked automatically'), 20
block_manual = 2, _('Blocked manually'), 30
block_maxsim = 3, _('Blocked after reaching max sessions'), 40
block_rejected = 4, _('Rejected'), 50
block_inactive = 5, _('Inactive'), 60
error = 99, _('Error'), 70
class AccessBlockState(DeclEnum):
planned = 'planned', _('Planned'), 10
active = 'active', _('Active'), 20
expired = 'expired', _('Expired'), 30
class AccessEntity(Entity):
"""
Access entity object.
"""
DN_ATTR = 'uid'
__tablename__ = 'entities_access'
__table_args__ = (
Comment('Access entities'),
Index('entities_access_i_stashid', 'stashid'),
Index('entities_access_i_rateid', 'rateid'),
Index('entities_access_i_aliasid', 'aliasid'),
Index('entities_access_i_ipaddrid', 'ipaddrid'),
Index('entities_access_i_ip6addrid', 'ip6addrid'),
Index('entities_access_i_nextrateid', 'nextrateid'),
Trigger('before', 'insert', 't_entities_access_bi'),
Trigger('before', 'update', 't_entities_access_bu'),
Trigger('after', 'update', 't_entities_access_au'),
Trigger('after', 'delete', 't_entities_access_ad'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ENTITIES',
'cap_read' : 'ENTITIES_LIST',
'cap_create' : 'ENTITIES_CREATE',
'cap_edit' : 'ENTITIES_EDIT',
'cap_delete' : 'ENTITIES_DELETE',
'show_in_menu' : 'modules',
'menu_name' : _('Access Entities'),
'menu_parent' : 'entities',
'default_sort' : ({ 'property': 'nick' ,'direction': 'ASC' },),
'grid_view' : (
MarkupColumn(
name='icon',
header_string=' ',
help_text=_('Entity icon'),
column_width=22,
column_name=_('Icon'),
column_resizable=False,
cell_class='np-nopad',
template='<img class="np-block-img" src="{grid_icon}" />'
),
'entityid',
'nick', 'stash', 'rate'
),
'grid_hidden' : ('entityid',),
'form_view' : (
'nick', 'parent', 'state', 'flags',
'password', 'stash', 'rate', 'next_rate', #'alias_of',
'ipv4_address', 'ipv6_address',
'ut_ingress', 'ut_egress', 'u_sec',
'qpend', 'access_state',
'pol_ingress', 'pol_egress',
'bcheck', 'pcheck',
'descr'
),
'easy_search' : ('nick',),
'extra_data' : ('grid_icon',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : Wizard(
Step(
'nick', 'parent', 'state',
'flags', 'descr',
id='generic', title=_('Generic entity properties'),
),
Step(
'password', 'stash', 'rate',
id='ent_access1', title=_('Access entity properties'),
),
title=_('Add new access entity'), validator='CreateAccessEntity'
)
}
}
)
__mapper_args__ = {
'polymorphic_identity' : EntityType.access
}
id = Column(
'entityid',
UInt32(),
ForeignKey('entities_def.entityid', name='entities_access_fk_entityid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Entity ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
password = Column(
Unicode(255),
Comment('Cleartext password'),
nullable=False,
info={
'header_string' : _('Password'),
'secret_value' : True,
'editor_xtype' : 'passwordfield'
}
)
stash_id = Column(
'stashid',
UInt32(),
ForeignKey('stashes_def.stashid', name='entities_access_fk_stashid', onupdate='CASCADE'),
Comment('Used stash ID'),
nullable=False,
info={
'header_string' : _('Stash'),
'column_flex' : 3
}
)
rate_id = Column(
'rateid',
UInt32(),
ForeignKey('rates_def.rateid', name='entities_access_fk_rateid', onupdate='CASCADE'),
Comment('Used rate ID'),
nullable=False,
info={
'header_string' : _('Rate'),
'column_flex' : 2
}
)
alias_of_id = Column(
'aliasid',
UInt32(),
ForeignKey('entities_access.entityid', name='entities_access_fk_aliasid', ondelete='CASCADE', onupdate='CASCADE'),
Comment('Aliased access entity ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Alias Of'),
'filter_type' : 'none'
}
)
next_rate_id = Column(
'nextrateid',
UInt32(),
ForeignKey('rates_def.rateid', name='entities_access_fk_nextrateid', ondelete='SET NULL', onupdate='CASCADE'),
Comment('Next rate ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Next Rate')
}
)
ipv4_address_id = Column(
'ipaddrid',
UInt32(),
ForeignKey('ipaddr_def.ipaddrid', name='entities_access_fk_ipaddrid', ondelete='SET NULL', onupdate='CASCADE'),
Comment('IPv4 address ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('IPv4 Address')
}
)
ipv6_address_id = Column(
'ip6addrid',
UInt64(),
ForeignKey('ip6addr_def.ip6addrid', name='entities_access_fk_ip6addrid', ondelete='SET NULL', onupdate='CASCADE'),
Comment('IPv6 address ID'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('IPv6 Address')
}
)
used_traffic_ingress = Column(
'ut_ingress',
Traffic(),
Comment('Used ingress traffic'),
nullable=False,
default=0,
server_default=text('0'),
info={
'header_string' : _('Used Ingress')
}
)
used_traffic_egress = Column(
'ut_egress',
Traffic(),
Comment('Used egress traffic'),
nullable=False,
default=0,
server_default=text('0'),
info={
'header_string' : _('Used Egress')
}
)
used_seconds = Column(
'u_sec',
UInt32(),
Comment('Used seconds'),
nullable=False,
default=0,
server_default=text('0'),
info={
'header_string' : _('Used Seconds')
}
)
quota_period_end = Column(
'qpend',
TIMESTAMP(),
Comment('End of quota period'),
nullable=True,
default=None,
server_default=FetchedValue(),
info={
'header_string' : _('Ends')
}
)
access_state = Column(
'state',
UInt8(),
Comment('Access code'),
nullable=False,
default=0,
server_default=text('0'),
info={
'header_string' : _('Access Code'),
'write_cap' : 'ENTITIES_ACCOUNTSTATE_EDIT'
}
)
policy_ingress = Column(
'pol_ingress',
ASCIIString(255),
Comment('Ingress traffic policy'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Ingress Policy')
}
)
policy_egress = Column(
'pol_egress',
ASCIIString(255),
Comment('Egress traffic policy'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Egress Policy')
}
)
check_block_state = Column(
'bcheck',
NPBoolean(),
Comment('Check block state'),
nullable=False,
default=False,
server_default=npbool(False),
info={
'header_string' : _('Check Blocks')
}
)
check_paid_services = Column(
'pcheck',
NPBoolean(),
Comment('Check paid services'),
nullable=False,
default=False,
server_default=npbool(False),
info={
'header_string' : _('Check Services')
}
)
stash = relationship(
'Stash',
innerjoin=True,
backref='access_entities'
)
rate = relationship(
'Rate',
innerjoin=True,
foreign_keys=rate_id,
backref='access_entities'
)
next_rate = relationship(
'Rate',
foreign_keys=next_rate_id,
backref='pending_access_entities'
)
alias_of = relationship(
'AccessEntity',
foreign_keys=alias_of_id,
remote_side=[id],
backref='aliases'
)
ipv4_address = relationship(
'IPv4Address',
backref='access_entities'
)
ipv6_address = relationship(
'IPv6Address',
backref='access_entities'
)
blocks = relationship(
'AccessBlock',
backref=backref('entity', innerjoin=True),
cascade='all, delete-orphan',
passive_deletes=True
)
def access_state_string(self, req):
loc = get_localizer(req)
if self.access_state is None:
return None
return loc.translate(AccessState.from_string(self.access_state).description)
def grid_icon(self, req):
return req.static_url('netprofile_access:static/img/access.png')
class PerUserRateModifier(Base):
"""
Per-user rate modifier definition
"""
__tablename__ = 'rates_mods_peruser'
__table_args__ = (
Comment('Per-user rate modifiers'),
Index('rates_mods_peruser_u_mapping', 'rmtid', 'entityid', 'rateid', unique=True),
Index('rates_mods_peruser_i_entityid', 'entityid'),
Index('rates_mods_peruser_i_rateid', 'rateid'),
Index('rates_mods_peruser_i_l_ord', 'l_ord'),
Trigger('before', 'insert', 't_rates_mods_peruser_bi'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ENTITIES', # FIXME
'cap_read' : 'ENTITIES_LIST', # FIXME
'cap_create' : 'ENTITIES_EDIT', # FIXME
'cap_edit' : 'ENTITIES_EDIT', # FIXME
'cap_delete' : 'ENTITIES_EDIT', # FIXME
'menu_name' : _('Rate Modifiers'),
'default_sort' : ({ 'property': 'l_ord', 'direction': 'ASC' },),
'grid_view' : ('rmid', 'entity', 'rate', 'type', 'enabled', 'l_ord'),
'grid_hidden' : ('rmid',),
'create_wizard' : SimpleWizard(title=_('Add new rate modifier'))
}
}
)
id = Column(
'rmid',
UInt32(),
Sequence('rates_mods_peruser_rmid_seq'),
Comment('Rate modifier ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
type_id = Column(
'rmtid',
UInt32(),
Comment('Rate modifier type ID'),
ForeignKey('rates_mods_types.rmtid', name='rates_mods_peruser_fk_rmtid', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False,
info={
'header_string' : _('Type'),
'filter_type' : 'list',
'column_flex' : 1
}
)
entity_id = Column(
'entityid',
UInt32(),
Comment('Access entity ID'),
ForeignKey('entities_access.entityid', name='rates_mods_peruser_fk_entityid', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False,
info={
'header_string' : _('Account'),
'filter_type' : 'none',
'column_flex' : 1
}
)
rate_id = Column(
'rateid',
UInt32(),
Comment('Rate ID'),
ForeignKey('rates_def.rateid', name='rates_mods_peruser_fk_rateid', ondelete='CASCADE', onupdate='CASCADE'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Rate'),
'filter_type' : 'list',
'column_flex' : 1
}
)
creation_time = Column(
'ctime',
TIMESTAMP(),
Comment('Creation timestamp'),
nullable=True,
default=None,
server_default=FetchedValue(),
info={
'header_string' : _('Created'),
'read_only' : True
}
)
enabled = Column(
NPBoolean(),
Comment('Is modifier enabled?'),
nullable=False,
default=False,
server_default=npbool(False),
info={
'header_string' : _('Enabled')
}
)
lookup_order = Column(
'l_ord',
UInt16(),
Comment('Lookup order'),
nullable=False,
default=1000,
server_default=text('1000'),
info={
'header_string' : _('Lookup Order')
}
)
type = relationship(
'RateModifierType',
innerjoin=True,
backref=backref(
'per_user_modifiers',
cascade='all, delete-orphan',
passive_deletes=True
)
)
entity = relationship(
'AccessEntity',
innerjoin=True,
backref=backref(
'rate_modifiers',
cascade='all, delete-orphan',
passive_deletes=True
)
)
rate = relationship(
'Rate',
backref=backref(
'per_user_modifiers',
cascade='all, delete-orphan',
passive_deletes=True
)
)
class AccessBlock(Base):
"""
Access block entry object.
"""
__tablename__ = 'accessblock_def'
__table_args__ = (
Comment('Access entity blocks'),
Index('accessblock_def_i_entityid', 'entityid'),
Index('accessblock_def_i_bstate_start', 'bstate', 'startts'),
Index('accessblock_def_i_startts', 'startts'),
Trigger('before', 'insert', 't_accessblock_def_bi'),
Trigger('before', 'update', 't_accessblock_def_bu'),
Trigger('after', 'insert', 't_accessblock_def_ai'),
Trigger('after', 'update', 't_accessblock_def_au'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ENTITIES', # FIXME
'cap_read' : 'ENTITIES_LIST', # FIXME
'cap_create' : 'ENTITIES_EDIT', # FIXME
'cap_edit' : 'ENTITIES_EDIT', # FIXME
'cap_delete' : 'ENTITIES_EDIT', # FIXME
'menu_name' : _('Access Blocks'),
'default_sort' : ({ 'property': 'startts' ,'direction': 'ASC' },),
'grid_view' : ('abid', 'entity', 'startts', 'endts', 'bstate'),
'grid_hidden' : ('abid',),
'form_view' : ('entity', 'startts', 'endts', 'bstate', 'oldstate'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new access block'))
}
}
)
id = Column(
'abid',
UInt32(),
Sequence('accessblock_def_abid_seq'),
Comment('Access block ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
entity_id = Column(
'entityid',
UInt32(),
Comment('Access entity ID'),
ForeignKey('entities_access.entityid', name='accessblock_def_fk_entityid', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False,
info={
'header_string' : _('Account'),
'column_flex' : 2,
'filter_type' : 'none'
}
)
start = Column(
'startts',
TIMESTAMP(),
Comment('Start of block'),
CurrentTimestampDefault(),
nullable=False,
info={
'header_string' : _('Start'),
'column_flex' : 1
}
)
end = Column(
'endts',
TIMESTAMP(),
Comment('End of block'),
nullable=False,
info={
'header_string' : _('End'),
'column_flex' : 1
}
)
state = Column(
'bstate',
AccessBlockState.db_type(),
Comment('Block state'),
nullable=False,
default=AccessBlockState.expired,
server_default=AccessBlockState.expired,
info={
'header_string' : _('State')
}
)
old_entity_state = Column(
'oldstate',
UInt8(),
Comment('Old entity state'),
nullable=False,
default=0,
server_default=text('0'),
info={
'header_string' : _('Access State')
}
)
def __str__(self):
# FIXME: use datetime range with formats
return '%s:' % str(self.entity)
class AccessEntityLinkType(Base):
"""
Access entity link type object.
"""
__tablename__ = 'entities_access_linktypes'
__table_args__ = (
Comment('Access entity link types'),
Index('entities_access_linktypes_u_name', 'name', unique=True),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ENTITIES', # FIXME
'cap_read' : 'ENTITIES_LIST', # FIXME
'cap_create' : 'ENTITIES_EDIT', # FIXME
'cap_edit' : 'ENTITIES_EDIT', # FIXME
'cap_delete' : 'ENTITIES_EDIT', # FIXME
'show_in_menu' : 'admin',
'menu_name' : _('Link Types'),
'default_sort' : ({ 'property': 'name' ,'direction': 'ASC' },),
'grid_view' : ('ltid', 'name'),
'grid_hidden' : ('ltid',),
'form_view' : ('name', 'descr'),
'easy_search' : ('name',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new link type'))
}
}
)
id = Column(
'ltid',
UInt32(),
Sequence('entities_access_linktypes_ltid_seq'),
Comment('Link type ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
name = Column(
Unicode(255),
Comment('Link type name'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 1
}
)
description = Column(
'descr',
UnicodeText(),
Comment('Link type description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description')
}
)
def __str__(self):
return '%s' % self.name
class AccessEntityLink(Base):
"""
Access entity link object.
"""
__tablename__ = 'entities_access_links'
__table_args__ = (
Comment('Access entity links'),
Index('entities_access_links_i_entityid', 'entityid'),
Index('entities_access_links_i_ltid', 'ltid'),
Index('entities_access_links_i_value', 'value'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ENTITIES', # FIXME
'cap_read' : 'ENTITIES_LIST', # FIXME
'cap_create' : 'ENTITIES_EDIT', # FIXME
'cap_edit' : 'ENTITIES_EDIT', # FIXME
'cap_delete' : 'ENTITIES_EDIT', # FIXME
'menu_name' : _('Links'),
'default_sort' : ({ 'property': 'ltid' ,'direction': 'ASC' },),
'grid_view' : ('lid', 'entity', 'type', 'ts', 'value'),
'grid_hidden' : ('lid',),
'easy_search' : ('value',),
'create_wizard' : SimpleWizard(title=_('Add new link'))
}
}
)
id = Column(
'lid',
UInt32(),
Sequence('entities_access_links_lid_seq'),
Comment('Link ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
entity_id = Column(
'entityid',
UInt32(),
Comment('Access entity ID'),
ForeignKey('entities_access.entityid', name='entities_access_links_fk_entityid', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False,
info={
'header_string' : _('Entity'),
'column_flex' : 2
}
)
type_id = Column(
'ltid',
UInt32(),
Comment('Link type ID'),
ForeignKey('entities_access_linktypes.ltid', name='entities_access_links_fk_ltid', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False,
info={
'header_string' : _('Type'),
'column_flex' : 2
}
)
timestamp = Column(
'ts',
TIMESTAMP(),
Comment('Service timestamp'),
CurrentTimestampDefault(),
nullable=True,
default=None,
info={
'header_string' : _('Timestamp'),
'column_flex' : 1
}
)
value = Column(
Unicode(255),
Comment('Link value'),
nullable=False,
info={
'header_string' : _('Value'),
'column_flex' : 3
}
)
entity = relationship(
'AccessEntity',
innerjoin=True,
backref=backref(
'links',
cascade='all, delete-orphan',
passive_deletes=True
)
)
type = relationship(
'AccessEntityLinkType',
innerjoin=True,
backref=backref(
'links',
cascade='all, delete-orphan',
passive_deletes=True
)
)
CheckAuthFunction = SQLFunction(
'check_auth',
args=(
SQLFunctionArgument('name', Unicode(255)),
SQLFunctionArgument('pass', Unicode(255)),
),
returns=Boolean(),
comment='Check auth information',
writes_sql=False
)
AcctAddProcedure = SQLFunction(
'acct_add',
args=(
InArgument('aeid', UInt32()),
InArgument('username', Unicode(255)),
InArgument('tin', Traffic()),
InArgument('teg', Traffic()),
InArgument('ts', DateTime())
),
comment='Add accounting information',
label='aafunc',
is_procedure=True
)
AcctAuthzProcedure = SQLFunction(
'acct_authz',
args=(
InArgument('name', Unicode(255)),
),
comment='Get authorized account info',
writes_sql=False,
label='authzfunc',
is_procedure=True
)
AcctPollProcedure = SQLFunction(
'acct_poll',
args=(
InArgument('ts', DateTime()),
),
comment='Poll accounts for time-based changes',
is_procedure=True
)
AcctRateModsProcedure = SQLFunction(
'acct_rate_mods',
args=(
InArgument('ts', DateTime()),
InArgument('rateid', UInt32()),
InArgument('entityid', UInt32()),
InOutArgument('oqsum_in', Money()),
InOutArgument('oqsum_eg', Money()),
InOutArgument('oqsum_sec', Money()),
InOutArgument('pol_in', ASCIIString(255)),
InOutArgument('pol_eg', ASCIIString(255))
),
comment='Apply rate modifiers',
writes_sql=False,
label='armfunc',
is_procedure=True
)
AcctRollbackProcedure = SQLFunction(
'acct_rollback',
args=(
InArgument('aeid', UInt32()),
InArgument('ts', DateTime()),
InOutArgument('xstashid', UInt32()),
InArgument('xrateid_old', UInt32()),
InOutArgument('xrateid_new', UInt32()),
InOutArgument('uti', Traffic()),
InOutArgument('ute', Traffic()),
InOutArgument('xqpend', DateTime()),
InOutArgument('xstate', UInt8()),
OutArgument('xdiff', Money())
),
comment='Rollback current period for an account',
label='rbfunc',
is_procedure=True
)
AccessblockExpireEvent = SQLEvent(
'ev_accessblock_expire',
sched_unit='day',
sched_interval=1,
comment='Find and mark expired access blocks'
)
AcctPollEvent = SQLEvent(
'ev_acct_poll',
sched_unit='day',
sched_interval=1,
starts=dt.datetime.combine(dt.date.today(), dt.time(0, 0, 1)),
comment='Perform passive accounting'
)
|
nikitos/npui
|
netprofile_access/netprofile_access/models.py
|
Python
|
agpl-3.0
| 23,665
|
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
class SalmonInstaller(ClusterSetup):
def run(self, nodes, master, user, user_shell, volumes):
for node in nodes:
log.info("Installing Salmon 0.3.0 on %s" % (node.alias))
node.ssh.execute('mkdir -p /opt/software/salmon/0.3.0/')
node.ssh.execute('wget -c -P /opt/software/salmon/ https://github.com/kingsfordgroup/sailfish/releases/download/v0.3.0/SalmonBeta-v0.3.0_ubuntu-12.04.tar.gz')
node.ssh.execute('tar -xzf /opt/software/salmon/SalmonBeta-v0.3.0_ubuntu-12.04.tar.gz -C /opt/software/salmon/0.3.0/')
node.ssh.execute('chmod +x /opt/software/salmon/0.3.0/SalmonBeta-latest_ubuntu-12.04/bin/salmon')
node.ssh.execute('mkdir -p /usr/local/Modules/applications/salmon/;touch /usr/local/Modules/applications/salmon/0.3.0')
node.ssh.execute('echo "#%Module" >> /usr/local/Modules/applications/salmon/0.3.0')
node.ssh.execute('echo "set root /opt/software/salmon/0.3.0/SalmonBeta-latest_ubuntu-12.04/" >> /usr/local/Modules/applications/salmon/0.3.0')
node.ssh.execute('echo -e "prepend-path\tPATH\t\$root/bin" >> /usr/local/Modules/applications/salmon/0.3.0')
|
meissnert/StarCluster-Plugins
|
salmon_0_3_0.py
|
Python
|
mit
| 1,179
|
"""
WSGI config for ubermanage project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ubermanage.settings")
application = get_wsgi_application()
|
nerdoc/ubermanage
|
ubermanage/wsgi.py
|
Python
|
agpl-3.0
| 397
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from PIL import Image
from PIL import ImageEnhance
from random import randrange
# ----------------------------------------
# Image resizing
# ----------------------------------------
def image_resize_image(base64_source, size=(1024, 1024), encoding='base64', filetype=None, avoid_if_small=False):
""" Function to resize an image. The image will be resized to the given
size, while keeping the aspect ratios, and holes in the image will be
filled with transparent background. The image will not be stretched if
smaller than the expected size.
Steps of the resizing:
- Compute width and height if not specified.
- if avoid_if_small: if both image sizes are smaller than the requested
sizes, the original image is returned. This is used to avoid adding
transparent content around images that we do not want to alter but
just resize if too big. This is used for example when storing images
in the 'image' field: we keep the original image, resized to a maximal
size, without adding transparent content around it if smaller.
- create a thumbnail of the source image through using the thumbnail
function. Aspect ratios are preserved when using it. Note that if the
source image is smaller than the expected size, it will not be
extended, but filled to match the size.
- create a transparent background that will hold the final image.
- paste the thumbnail on the transparent background and center it.
:param base64_source: base64-encoded version of the source
image; if False, returns False
:param size: 2-tuple(width, height). A None value for any of width or
height mean an automatically computed value based respectivelly
on height or width of the source image.
:param encoding: the output encoding
:param filetype: the output filetype, by default the source image's
:type filetype: str, any PIL image format (supported for creation)
:param avoid_if_small: do not resize if image height and width
are smaller than the expected size.
"""
if not base64_source:
return False
if size == (None, None):
return base64_source
image_stream = StringIO.StringIO(base64_source.decode(encoding))
image = Image.open(image_stream)
# store filetype here, as Image.new below will lose image.format
filetype = (filetype or image.format).upper()
filetype = {
'BMP': 'PNG',
}.get(filetype, filetype)
asked_width, asked_height = size
if asked_width is None:
asked_width = int(image.size[0] * (float(asked_height) / image.size[1]))
if asked_height is None:
asked_height = int(image.size[1] * (float(asked_width) / image.size[0]))
size = asked_width, asked_height
# check image size: do not create a thumbnail if avoiding smaller images
if avoid_if_small and image.size[0] <= size[0] and image.size[1] <= size[1]:
return base64_source
if image.size != size:
image = image_resize_and_sharpen(image, size)
if image.mode not in ["1", "L", "P", "RGB", "RGBA"]:
image = image.convert("RGB")
background_stream = StringIO.StringIO()
image.save(background_stream, filetype)
return background_stream.getvalue().encode(encoding)
def image_resize_and_sharpen(image, size, preserve_aspect_ratio=False, factor=2.0):
"""
Create a thumbnail by resizing while keeping ratio.
A sharpen filter is applied for a better looking result.
:param image: PIL.Image.Image()
:param size: 2-tuple(width, height)
:param preserve_aspect_ratio: boolean (default: False)
:param factor: Sharpen factor (default: 2.0)
"""
if image.mode != 'RGBA':
image = image.convert('RGBA')
image.thumbnail(size, Image.ANTIALIAS)
if preserve_aspect_ratio:
size = image.size
sharpener = ImageEnhance.Sharpness(image)
resized_image = sharpener.enhance(factor)
# create a transparent image for background and paste the image on it
image = Image.new('RGBA', size, (255, 255, 255, 0))
image.paste(resized_image, ((size[0] - resized_image.size[0]) / 2, (size[1] - resized_image.size[1]) / 2))
return image
def image_save_for_web(image, fp=None, format=None):
"""
Save image optimized for web usage.
:param image: PIL.Image.Image()
:param fp: File name or file object. If not specified, a bytestring is returned.
:param format: File format if could not be deduced from image.
"""
opt = dict(format=image.format or format)
if image.format == 'PNG':
opt.update(optimize=True)
alpha = False
if image.mode in ('RGBA', 'LA') or (image.mode == 'P' and 'transparency' in image.info):
alpha = image.convert('RGBA').split()[-1]
if image.mode != 'P':
# Floyd Steinberg dithering by default
image = image.convert('RGBA').convert('P', palette=Image.WEB, colors=256)
if alpha:
image.putalpha(alpha)
elif image.format == 'JPEG':
opt.update(optimize=True, quality=80)
if fp:
image.save(fp, **opt)
else:
img = StringIO.StringIO()
image.save(img, **opt)
return img.getvalue()
def image_resize_image_big(base64_source, size=(1024, 1024), encoding='base64', filetype=None, avoid_if_small=True):
""" Wrapper on image_resize_image, to resize images larger than the standard
'big' image size: 1024x1024px.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_medium(base64_source, size=(128, 128), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'medium'
image size: 180x180.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_small(base64_source, size=(64, 64), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'small' image
size: 50x50.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
# ----------------------------------------
# Crop Image
# ----------------------------------------
def crop_image(data, type='top', ratio=False, thumbnail_ratio=None, image_format="PNG"):
""" Used for cropping image and create thumbnail
:param data: base64 data of image.
:param type: Used for cropping position possible
Possible Values : 'top', 'center', 'bottom'
:param ratio: Cropping ratio
e.g for (4,3), (16,9), (16,10) etc
send ratio(1,1) to generate square image
:param thumbnail_ratio: It is size reduce ratio for thumbnail
e.g. thumbnail_ratio=2 will reduce your 500x500 image converted in to 250x250
:param image_format: return image format PNG,JPEG etc
"""
if not data:
return False
image_stream = Image.open(StringIO.StringIO(data.decode('base64')))
output_stream = StringIO.StringIO()
w, h = image_stream.size
new_h = h
new_w = w
if ratio:
w_ratio, h_ratio = ratio
new_h = (w * h_ratio) / w_ratio
new_w = w
if new_h > h:
new_h = h
new_w = (h * w_ratio) / h_ratio
if type == "top":
cropped_image = image_stream.crop((0, 0, new_w, new_h))
cropped_image.save(output_stream, format=image_format)
elif type == "center":
cropped_image = image_stream.crop(((w - new_w) / 2, (h - new_h) / 2, (w + new_w) / 2, (h + new_h) / 2))
cropped_image.save(output_stream, format=image_format)
elif type == "bottom":
cropped_image = image_stream.crop((0, h - new_h, new_w, h))
cropped_image.save(output_stream, format=image_format)
else:
raise ValueError('ERROR: invalid value for crop_type')
# TDE FIXME: should not have a ratio, makes no sense -> should have maximum width (std: 64; 256 px)
if thumbnail_ratio:
thumb_image = Image.open(StringIO.StringIO(output_stream.getvalue()))
thumb_image.thumbnail((new_w / thumbnail_ratio, new_h / thumbnail_ratio), Image.ANTIALIAS)
thumb_image.save(output_stream, image_format)
return output_stream.getvalue().encode('base64')
# ----------------------------------------
# Colors
# ---------------------------------------
def image_colorize(original, randomize=True, color=(255, 255, 255)):
""" Add a color to the transparent background of an image.
:param original: file object on the original image file
:param randomize: randomize the background color
:param color: background-color, if not randomize
"""
# create a new image, based on the original one
original = Image.open(StringIO.StringIO(original))
image = Image.new('RGB', original.size)
# generate the background color, past it as background
if randomize:
color = (randrange(32, 224, 24), randrange(32, 224, 24), randrange(32, 224, 24))
image.paste(color, box=(0, 0) + original.size)
image.paste(original, mask=original)
# return the new image
buffer = StringIO.StringIO()
image.save(buffer, 'PNG')
return buffer.getvalue()
# ----------------------------------------
# Misc image tools
# ---------------------------------------
def image_get_resized_images(base64_source, return_big=False, return_medium=True, return_small=True,
big_name='image', medium_name='image_medium', small_name='image_small',
avoid_resize_big=True, avoid_resize_medium=False, avoid_resize_small=False):
""" Standard tool function that returns a dictionary containing the
big, medium and small versions of the source image. This function
is meant to be used for the methods of functional fields for
models using images.
Default parameters are given to be used for the getter of functional
image fields, for example with res.users or res.partner. It returns
only image_medium and image_small values, to update those fields.
:param base64_source: base64-encoded version of the source
image; if False, all returnes values will be False
:param return_{..}: if set, computes and return the related resizing
of the image
:param {..}_name: key of the resized image in the return dictionary;
'image', 'image_medium' and 'image_small' by default.
:param avoid_resize_[..]: see avoid_if_small parameter
:return return_dict: dictionary with resized images, depending on
previous parameters.
"""
return_dict = dict()
if return_big:
return_dict[big_name] = image_resize_image_big(base64_source, avoid_if_small=avoid_resize_big)
if return_medium:
return_dict[medium_name] = image_resize_image_medium(base64_source, avoid_if_small=avoid_resize_medium)
if return_small:
return_dict[small_name] = image_resize_image_small(base64_source, avoid_if_small=avoid_resize_small)
return return_dict
def image_resize_images(vals, big_name='image', medium_name='image_medium', small_name='image_small'):
""" Update ``vals`` with image fields resized as expected. """
if big_name in vals:
vals.update(image_get_resized_images(vals[big_name],
return_big=True, return_medium=True, return_small=True,
big_name=big_name, medium_name=medium_name, small_name=small_name,
avoid_resize_big=True, avoid_resize_medium=False, avoid_resize_small=False))
elif medium_name in vals:
vals.update(image_get_resized_images(vals[medium_name],
return_big=True, return_medium=True, return_small=True,
big_name=big_name, medium_name=medium_name, small_name=small_name,
avoid_resize_big=True, avoid_resize_medium=True, avoid_resize_small=False))
elif small_name in vals:
vals.update(image_get_resized_images(vals[small_name],
return_big=True, return_medium=True, return_small=True,
big_name=big_name, medium_name=medium_name, small_name=small_name,
avoid_resize_big=True, avoid_resize_medium=True, avoid_resize_small=True))
if __name__=="__main__":
import sys
assert len(sys.argv)==3, 'Usage to Test: image.py SRC.png DEST.png'
img = file(sys.argv[1],'rb').read().encode('base64')
new = image_resize_image(img, (128,100))
file(sys.argv[2], 'wb').write(new.decode('base64'))
|
laslabs/odoo
|
openerp/tools/image.py
|
Python
|
agpl-3.0
| 13,237
|
# Definition for a binary tree node
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class BSTIterator(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.root = root
self.iterator = root
self.path = []
if root == None:
return
while self.iterator.left != None:
self.path.append(self.iterator)
self.iterator = self.iterator.left
def hasNext(self):
"""
:rtype: bool
"""
return (self.iterator != None)
def next(self):
"""
:rtype: int
"""
ret = self.iterator.val
if self.iterator.right != None:
self.path.append(self.iterator)
self.iterator = self.iterator.right
while self.iterator.left != None:
self.path.append(self.iterator)
self.iterator = self.iterator.left
else:
next_iterator = None
while True:
if len(self.path) == 0:
next_iterator = None
break
next_iterator = self.path[-1]
self.path = self.path[:-1]
if next_iterator.val > ret:
break
self.iterator = next_iterator
return ret
# Your BSTIterator will be called like this:
# i, v = BSTIterator(root), []
# while i.hasNext(): v.append(i.next())
|
zeyuanxy/leet-code
|
vol4/binary-search-tree-iterator/binary-search-tree-iterator.py
|
Python
|
mit
| 1,580
|
"""
==============================================================================
Program: roiAnalysis.py
Author: Kyle Reese Almryde
Date: 11/20/2012 @ 13:16:12 PM
Description: This module contains methods for performing ROI analysis on
functional Magnetic Resonance Images.
==============================================================================
"""
import os
import sys
def filterClusters(inputFile, maskImage, outputFile):
"""Filter single subject images through clusters
This function filters the individual subject files through the
selected cluster images.
Params:
inputFile -- a 4D Image, path included, and subbrick index if
necessary. eg, '/path/to/image/file/4dImage.nii[1]'
maskImage -- a binary Image intended for use as a mask, path included eg,
'/path/to/image/file/MaskImage.nii'
NOTE: it is optional for the image to be in binary format
outputFile -- a 4D Image, path included, describing the desired output
file name: '/path/to/image/file/4dImageMasked.nii'
"""
os.system('3drefit -space MNI ' + maskImage)
os.system('3dcalc -a ' + maskImage \
+ ' -b ' + inputFile \
+ " -expr 'step(a) * b'" \
+ ' -prefix ' + outputFile)
def flipXYZ(oldXYZ): # This is an example of a nice Modular function.
"""Flip XY coords of an XYZ coordinate string
This function inverts the xy coordinates generated by afni's
3dclust command. It is presently acting as a sort of Helper
function to getClusterStats function defined below.
Params:
oldXYZ -- a string containing the original xyz coordinates
oldXYZ == '58 42 8'
Returns:
xyz -- a new string with the inverted xy + z coordinates
xyz == '-58 -42 8'
"""
coordList = oldXYZ.split()
x = int(coordList[0]) * -1
y = int(coordList[1]) * -1
xyz = ' '.join([str(x), str(y), coordList[2]])
return xyz
def whereAmI(xyz): # This is a good Modular function
"""Get the region of interest from the supplied xyz coordinates
This function utilizes afni's CA_ML_18_MNIA atlas and reads xyz
coordinates in -lpi orientation. It returns only the first listed
roi, so if more detail is desired regarding the surrounding regions
this function may not be the most ideal.
Params:
xyz -- a string of three numbers representing xyz coordinates
'-50 48 2'
Returns:
A string representing the region of interest
"""
atlas = os.popen('whereami ' + xyz + ' -atlas CA_ML_18_MNIA -lpi').readlines()
index = atlas.index('Atlas CA_ML_18_MNIA: Macro Labels (N27)\n')
roi = atlas[index + 1].split(':')[1].strip().split()
return roi[0] + '\t' + ' '.join(roi[1:])
def getClusterStats(imgFile): # This has potential to be very Modular, I just need to decide if I like it enough
"""Extract cluster stats from image file
This function uses the os mondule popen to capture output from
afni's 3dclust command. Presently it assumes the image is in
2x2x2 resolution. Output is the mean and peak voxel intensity
followed by the peak xyz coordinates
Params:
imgFile -- a 4D Image, path included eg,
'/path/to/image/file/4dImage.nii.gz'
Returns:
stats -- a string containing the values for mean, peak, xyz, and roi
"""
clusterTbl = os.popen('3dclust 2 1 ' + imgFile).readlines()[-1].strip() # Strip newline and get last line with the stats output table from 3dclust
tempXyz = ' '.join(clusterTbl.split()[-3:]).replace('.0', '') # Strip the '.0' from the coordinate numbers.
mean = os.popen('3dBrickStat -mean ' + imgFile).readline().strip() # get the mean of the image file
peak = clusterTbl.split()[-4] # a list object containing the [peak intensity, Xcoord, Ycoord, Zcoord]
xyz = flipXYZ(tempXyz) # Use the flipXYZ function to flip the x and y coordinates (name is a bit misleading)
roi = whereAmI(xyz) # Extract the Region of Interest based on the supplied xyz coordinates
return ' '.join([mean, peak, xyz, roi])
# def groupImageStats(imgFile, brik='', outImage):
# """ Strip the desired image statistics from the image file
# Specifically, remove those subbricks from specified from the
# supplied image, and store them in their own file that can be
# manipulated more easily later on.
# Params:
# imgFile -- The input 4d file. It can be a subject image
# file or a group image file, so long as at
# least 2 subbricks reside within the image.
# The image should contain the desired path.
# '/path/to/image/file/4dImage.nii.gz'
# Optionally, a list of 4d images can be supplied
# in which case a string will be constructed
# using a list comprehension.
# brik -- The desired subbrik(s) to be extracted. AFNI
# conventions for specifying subbriks apply.
# outImage -- The desired prefix for the newly created
# image file. The path name should be included
# in the image prefix
# Returns:
# A string composed of the output image's path and name,
# in case it is needed.
# """
# if type(imgFile) == list:
# imgFile = ' '.join([x + brik for x in imgFile])
# else:
# imgFile = imgFile + brik
# os.system('3dbucket -prefix ' + outImage + ' ' + imgFile)
# return outImage
# def computeImageMean(imgList, brik='', outImage):
# """ using 3dmean, average datasets
# Params:
# imgList -- A list of 4d images to be averaged. It is assumed
# the list has already been stripped.
# brik -- an optional parameter which can specify a subbrik.
# outImage -- The desired prefix for the newly created
# image file. The path name should be included
# in the image prefix
# Returns:
# A string composed of the output image's path and name,
# in case it is needed.
# """
# imgFiles = ' '.join([x + brik for x in imgList])
# os.system('3dMean -prefix ' + outImage + ' ' + imgFiles)
# return outImage
# def oneSample_tTest(imgList, maskFile, brik='', outImage):
# """ perform a one sample tTest
# Params:
# imgList --
# maskFile --
# brik --
# outImage --
# Returns:
# Description of returns
# """
# if type(imgFile) == list:
# imgFile = ' '.join([x + brik for x in imgFile])
# else:
# imgFile = imgFile + brik
# os.system('3dttest++ -setA ' + imgFiles
# + ' -mask ' + maskFile
# + ' -prefix ' + outImage)
|
KrbAlmryde/Utilities
|
WorkShop/PYTHON/roiAnalysis.py
|
Python
|
mit
| 6,956
|
#!/usr/bin/python
# Copyright (c) 2009, Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Purdue University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Regression test for iscpy.py
Make sure you are running this against a database that can be destroyed.
DO NOT EVER RUN THIS TEST AGAINST A PRODUCTION DATABASE.
"""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#1.0.3#'
import unittest
import os
import iscpy
NAMED_FILE = 'test_data/named.example.conf'
class TestNamedImport(unittest.TestCase):
def setUp(self):
self.named_file = open(NAMED_FILE).read()
self.maxDiff = None
def testScrubComments01(self):
self.assertEqual(iscpy.ScrubComments("/* */foobar"), "foobar")
def testScrubComments02(self):
self.assertEqual(iscpy.ScrubComments("/*\n*/foobar"), "foobar")
def testScrubComments03(self):
self.assertEqual(iscpy.ScrubComments("/*\n*\n*\n*/foobar"), "foobar")
def testScrubComments(self):
self.assertEqual(iscpy.ScrubComments(self.named_file),
'options {\ndirectory "/var/domain";\nrecursion yes;\n'
'allow-query { any; };\nmax-cache-size 512M;\n};\n\n'
'logging {\nchannel "security" {\n'
'file "/var/log/named-security.log" '
'versions 10 size 10m;\nprint-time yes;\n};\n'
'channel "query_logging" {\nsyslog local5;\n'
'severity info;\n};\ncategory "client" { "null"; };\n'
'category "update-security" { "security"; };\n'
'category "queries" { "query_logging"; };\n};\n\n'
'controls {\ninet * allow { control-hosts; } keys '
'{rndc-key; };\n};\n\ninclude "/etc/rndc.key";\n\n'
'acl control-hosts {\n127.0.0.1/32;\n192.168.1.3/32;\n};\n'
'\nacl admin {\n192.168.0.0/16;\n192.168.1.2/32;\n'
'192.168.1.4/32;\n};\n\nview "unauthorized" {\n'
'recursion no;\nmatch-clients { network-unauthorized; };\n'
'additional-from-auth no;\nadditional-from-cache no;\n\n'
'zone "0.0.127.in-addr.arpa" {\ntype slave;\n'
'file "test_data/university.rev.bak";\nmasters {\n'
'192.168.1.3;\n'
'};\n};\n\nzone "1.210.128.in-addr.arpa" {\ntype master;\n'
'file "test_data/test_reverse_zone.db";\n'
'allow-query { network-unauthorized; };\n};\n\n'
'zone "." {\ntype hint;\nfile "named.ca";\n};\n};\n\n'
'view "authorized" {\nrecursion yes;\n'
'match-clients { network-authorized; };\n'
'allow-recursion { network-authorized; };\n'
'allow-query-cache { network-authorized; };\n'
'additional-from-auth yes;\nadditional-from-cache yes;\n\n'
'zone "university.edu" {\ntype slave;\n'
'file "test_data/university.db.bak";\nmasters {\n'
'192.168.11.37;\n};\ncheck-names ignore;\n};\n\n'
'zone "smtp.university.edu" {\ntype master;\n'
'file "test_data/test_zone.db";\nmasters {\n'
'192.168.11.37;\n};\n};\n\nzone "." {\ntype hint;\n'
'file "named.ca";\n};\n};\n\n')
def testExplode(self):
self.assertEqual(iscpy.Explode(self.named_file),
['options', '{', 'directory "/var/domain"', ';',
'recursion yes', ';', 'allow-query', '{', 'any', ';', '}',
';', 'max-cache-size 512M', ';', '}', ';', 'logging', '{',
'channel "security"', '{',
'file "/var/log/named-security.log" versions 10 size 10m',
';', 'print-time yes', ';', '}', ';',
'channel "query_logging"', '{', 'syslog local5', ';',
'severity info', ';', '}', ';', 'category "client"', '{',
'"null"', ';', '}', ';', 'category "update-security"',
'{', '"security"', ';', '}', ';', 'category "queries"',
'{', '"query_logging"', ';', '}', ';', '}', ';',
'controls', '{', 'inet * allow', '{', 'control-hosts',
';', '}', 'keys', '{', 'rndc-key', ';', '}', ';',
'}', ';', 'include "/etc/rndc.key"', ';',
'acl control-hosts', '{', '127.0.0.1/32', ';',
'192.168.1.3/32', ';', '}', ';', 'acl admin', '{',
'192.168.0.0/16', ';', '192.168.1.2/32', ';',
'192.168.1.4/32', ';', '}', ';', 'view "unauthorized"',
'{', 'recursion no', ';', 'match-clients', '{',
'network-unauthorized', ';', '}', ';',
'additional-from-auth no', ';',
'additional-from-cache no', ';',
'//\t// Loopback network\t//\tzone "0.0.127.in-addr.arpa"',
'{', 'type slave', ';',
'file "test_data/university.rev.bak"',
';', 'masters', '{', '192.168.1.3', ';', '}', ';', '}',
';',
'//\t// 192.168.1.0/24\t//\tzone "1.210.128.in-addr.arpa"',
'{', 'type master', ';',
'file "test_data/test_reverse_zone.db"', ';',
'allow-query', '{', 'network-unauthorized', ';', '}', ';',
'}', ';', '//\t// Cache File\t//\tzone "."', '{',
'type hint', ';', 'file "named.ca"', ';', '}', ';', '}',
';', 'view "authorized"', '{', 'recursion yes', ';',
'match-clients', '{', 'network-authorized', ';', '}', ';',
'allow-recursion', '{', 'network-authorized', ';', '}',
';', 'allow-query-cache', '{', 'network-authorized', ';',
'}', ';', 'additional-from-auth yes', ';',
'additional-from-cache yes', ';', 'zone "university.edu"',
'{', 'type slave', ';',
'file "test_data/university.db.bak"', ';', 'masters', '{',
'192.168.11.37', ';', '}', ';', 'check-names ignore', ';',
'}', ';',
'//\t// Internal view of "smtp.university.edu"\t//\tzone "smtp.university.edu"',
'{', 'type master', ';', 'file "test_data/test_zone.db"',
';', 'masters', '{', '192.168.11.37', ';', '}', ';', '}',
';', '//\t// Cache File\t//\tzone "."', '{', 'type hint',
';', 'file "named.ca"', ';', '}', ';', '}', ';'])
def testParse(self):
self.assertEqual(iscpy.ParseTokens(
iscpy.Explode(
iscpy.ScrubComments(self.named_file))),
{'acl control-hosts': {'127.0.0.1/32': True, '192.168.1.3/32': True},
'acl admin': {'192.168.1.2/32': True, '192.168.1.4/32': True,
'192.168.0.0/16': True},
'view "authorized"': {'zone "smtp.university.edu"':
{'masters': {'192.168.11.37': True},
'type': 'master', 'file': '"test_data/test_zone.db"'},
'allow-query-cache': {'network-authorized': True},
'allow-recursion': {'network-authorized': True},
'recursion': 'yes',
'zone "university.edu"': {'check-names': 'ignore',
'masters': {'192.168.11.37': True},
'type': 'slave', 'file': '"test_data/university.db.bak"'},
'match-clients': {'network-authorized': True},
'zone "."': {'type': 'hint', 'file': '"named.ca"'},
'additional-from-cache': 'yes',
'additional-from-auth': 'yes'},
'controls': [{'inet * allow': {'control-hosts': True}},
{'keys': {'rndc-key': True}}],
'view "unauthorized"':
{'zone "1.210.128.in-addr.arpa"':
{'allow-query': {'network-unauthorized': True},
'type': 'master',
'file': '"test_data/test_reverse_zone.db"'},
'recursion': 'no',
'match-clients': {'network-unauthorized': True},
'zone "."': {'type': 'hint', 'file': '"named.ca"'},
'zone "0.0.127.in-addr.arpa"': {
'masters': {'192.168.1.3': True}, 'type': 'slave',
'file': '"test_data/university.rev.bak"'},
'additional-from-cache': 'no', 'additional-from-auth': 'no'},
'logging': {'category "update-security"': {'"security"': True},
'category "queries"': {'"query_logging"': True},
'channel "query_logging"':
{'syslog': 'local5', 'severity': 'info'},
'category "client"': {'"null"': True},
'channel "security"':
{'file': '"/var/log/named-security.log" versions 10 size 10m',
'print-time': 'yes'}},
'include': '"/etc/rndc.key"',
'options': {'directory': '"/var/domain"', 'recursion': 'yes',
'allow-query': {'any': True}, 'max-cache-size': '512M'}})
def testMakeNamedDict(self):
self.assertEqual(iscpy.dns.MakeNamedDict(self.named_file),
{'acls': {'admin': ['192.168.1.2/32', '192.168.1.4/32',
'192.168.0.0/16'],
'control-hosts': ['127.0.0.1/32', '192.168.1.3/32']},
'options': {'include': '"/etc/rndc.key"',
'logging': {'category "update-security"':
{'"security"': True},
'category "queries"': {'"query_logging"': True},
'channel "query_logging"':
{'syslog': 'local5', 'severity': 'info'},
'category "client"': {'"null"': True},
'channel "security"':
{'file': '"/var/log/named-security.log" versions 10 size 10m',
'print-time': 'yes'}},
'options': {'directory': '"/var/domain"',
'recursion': 'yes',
'allow-query': {'any': True},
'max-cache-size': '512M'},
'controls': [{'inet * allow': {'control-hosts': True}},
{'keys': {'rndc-key': True}}]},
'orphan_zones': {},
'views':
{'authorized': {'zones':
{'university.edu':
{'type': 'slave',
'options': {'masters': {'192.168.11.37': True},
'check-names': 'ignore'},
'file': 'test_data/university.db.bak'},
'smtp.university.edu':
{'type': 'master',
'options': {'masters': {'192.168.11.37': True}},
'file': 'test_data/test_zone.db'},
'.':
{'type': 'hint', 'options': {}, 'file': 'named.ca'}},
'options': {'allow-recursion': {'network-authorized': True},
'recursion': 'yes',
'match-clients': {'network-authorized': True},
'allow-query-cache': {'network-authorized': True},
'additional-from-cache': 'yes',
'additional-from-auth': 'yes'}},
'unauthorized': {'zones':
{'0.0.127.in-addr.arpa':
{'type': 'slave',
'options': {'masters': {'192.168.1.3': True}},
'file': 'test_data/university.rev.bak'},
'1.210.128.in-addr.arpa':
{'type': 'master',
'options': {'allow-query':
{'network-unauthorized': True}},
'file': 'test_data/test_reverse_zone.db'},
'.':
{'type': 'hint', 'options': {}, 'file': 'named.ca'}},
'options': {'recursion': 'no', 'additional-from-cache': 'no',
'match-clients': {'network-unauthorized': True},
'additional-from-auth': 'no'}}}})
def testMakeZoneViewOptions(self):
self.assertEqual(iscpy.dns.MakeZoneViewOptions(
iscpy.dns.MakeNamedDict(self.named_file)),
{'zones':
{'university.edu':
'masters { 192.168.11.37; };\n'
'check-names ignore;',
'0.0.127.in-addr.arpa': 'masters { 192.168.1.3; };',
'smtp.university.edu': 'masters { 192.168.11.37; };',
'1.210.128.in-addr.arpa': 'allow-query { network-unauthorized; };',
'.': ''},
'views':
{'authorized': 'allow-recursion { network-authorized; };\n'
'recursion yes;\nmatch-clients { '
'network-authorized; };\nallow-query-cache { '
'network-authorized; };\nadditional-from-cache '
'yes;\nadditional-from-auth yes;',
'unauthorized': 'recursion no;\nadditional-from-cache no;\n'
'match-clients { network-unauthorized; };\n'
'additional-from-auth no;'}})
def testMakeNamedHeader(self):
self.assertEqual(iscpy.dns.DumpNamedHeader(
iscpy.dns.MakeNamedDict(self.named_file)),
'include "/etc/rndc.key";\n'
'logging { category "update-security" { "security"; };\n'
'category "queries" { "query_logging"; };\n'
'channel "query_logging" { syslog local5;\nseverity info; };\n'
'category "client" { "null"; };\n'
'channel "security" { file "/var/log/named-security.log" '
'versions 10 size 10m;\nprint-time '
'yes; }; };\n'
'options { directory "/var/domain";\nrecursion yes;\n'
'allow-query { any; };\nmax-cache-size 512M; };\n'
'controls { inet * allow { control-hosts; } keys { rndc-key; }; '
'};')
def testMakeISC(self):
self.assertEqual(iscpy.MakeISC(
{'level1': {'level2': {'level3': {'level4': {
'test1': True, 'test2': True, 'test3': True}}}},
'newarg': 'newval', 'new_stanza': {'test': True}}),
'new_stanza { test; };\n'
'level1 { level2 { level3 { level4 { test1;\n'
'test3;\n'
'test2; }; }; }; };\n'
'newarg newval;')
self.assertEqual(iscpy.MakeISC(iscpy.ParseISCString(self.named_file)),
'acl control-hosts { 127.0.0.1/32;\n'
'192.168.1.3/32; };\n'
'acl admin { 192.168.1.2/32;\n'
'192.168.1.4/32;\n'
'192.168.0.0/16; };\n'
'view "authorized" { zone "smtp.university.edu" { masters { 192.168.11.37; };\n'
'type master;\n'
'file "test_data/test_zone.db"; };\n'
'allow-query-cache { network-authorized; };\n'
'allow-recursion { network-authorized; };\n'
'recursion yes;\n'
'zone "university.edu" { check-names ignore;\n'
'masters { 192.168.11.37; };\n'
'type slave;\n'
'file "test_data/university.db.bak"; };\n'
'match-clients { network-authorized; };\n'
'zone "." { type hint;\n'
'file "named.ca"; };\n'
'additional-from-cache yes;\n'
'additional-from-auth yes; };\n'
'controls { inet * allow { control-hosts; } keys { rndc-key; }; };\n'
'view "unauthorized" { zone "1.210.128.in-addr.arpa" { allow-query { network-unauthorized; };\n'
'type master;\n'
'file "test_data/test_reverse_zone.db"; };\n'
'recursion no;\n'
'match-clients { network-unauthorized; };\n'
'zone "." { type hint;\n'
'file "named.ca"; };\n'
'zone "0.0.127.in-addr.arpa" { masters { 192.168.1.3; };\n'
'type slave;\n'
'file "test_data/university.rev.bak"; };\n'
'additional-from-cache no;\n'
'additional-from-auth no; };\n'
'logging { category "update-security" { "security"; };\n'
'category "queries" { "query_logging"; };\n'
'channel "query_logging" { syslog local5;\n'
'severity info; };\n'
'category "client" { "null"; };\n'
'channel "security" { file "/var/log/named-security.log" versions 10 size 10m;\n'
'print-time yes; }; };\n'
'include "/etc/rndc.key";\n'
'options { directory "/var/domain";\n'
'recursion yes;\n'
'allow-query { any; };\n'
'max-cache-size 512M; };')
if( __name__ == '__main__' ):
unittest.main()
|
uberj/iscpy
|
test/named_importer_lib_regtest.py
|
Python
|
bsd-3-clause
| 18,652
|
#!/usr/bin/env python
#coding:utf-8
# Author: mozman
# Purpose: a hack to generate XML containing CDATA by ElementTree
# Created: 26.05.2012
# Copyright (C) 2012, Manfred Moitzi
# License: GPLv3
# usage:
#
# from svgwrite.etree import etree, CDATA
#
# element = etree.Element('myTag')
# element.append(CDATA("< and >"))
#
# assert etree.tostring(element) == "<myTag><![CDATA[< and >]]></myTag>"
import sys
PY3 = sys.version_info[0] > 2
import xml.etree.ElementTree as etree
CDATA_TPL = "<![CDATA[%s]]>"
CDATA_TAG = CDATA_TPL
def CDATA(text):
element = etree.Element(CDATA_TAG)
element.text = text
return element
try:
original_serialize_xml = etree._serialize_xml
except AttributeError, e:
print 'etree patch error', str(e)
if PY3:
def _serialize_xml_with_CDATA_support(write, elem, qnames, namespaces):
if elem.tag == CDATA_TAG:
write(CDATA_TPL % elem.text)
else:
original_serialize_xml(write, elem, qnames, namespaces)
else:
def _serialize_xml_with_CDATA_support(write, elem, encoding, qnames, namespaces):
if elem.tag == CDATA_TAG:
write(CDATA_TPL % elem.text.encode(encoding))
else:
original_serialize_xml(write, elem, encoding, qnames, namespaces)
# ugly, ugly, ugly patching
try:
etree._serialize_xml = _serialize_xml_with_CDATA_support
except AttributeError, e:
print 'etree patch error', str(e)
|
hirobert/svgwrite
|
svgwrite/etree.py
|
Python
|
gpl-3.0
| 1,493
|
# Generated by Django 2.0 on 2018-05-15 15:28
import django.contrib.auth.models
import django.contrib.auth.validators
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('funcao_servidor', models.CharField(choices=[('1', 'ChefeDeDisciplina'), ('0', 'Servidor')], default=0, max_length=1)),
('funcao_docente', models.CharField(choices=[('2', 'ChefeDeDepartamento'), ('1', 'Coordenador'), ('0', 'Professor')], default=0, max_length=1)),
('tipo_usuario', models.CharField(choices=[('0', 'Professor'), ('1', 'Servidor')], default=1, max_length=1)),
('nome', models.CharField(max_length=100)),
('siape_matricula', models.CharField(max_length=30, unique=True, validators=[django.core.validators.RegexValidator('[0-9]{7,9}', 'SIAPE/Matricula invalido(a). O SIAPE/Matricula deve conter somente 7(sete) números.')])),
('disciplina', models.CharField(choices=[('4', 'A2'), ('2', 'B1'), ('0', 'B2'), ('1', 'B3'), ('3', 'C2')], default=0, max_length=1)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Curso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=255, unique=True)),
('modalidade', models.CharField(choices=[('0', 'Ensino_Medio'), ('1', 'Superior')], max_length=1)),
],
options={
'verbose_name': 'Curso',
'verbose_name_plural': 'Cursos',
},
),
migrations.CreateModel(
name='Departamento',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=355)),
],
options={
'verbose_name': 'Departamento',
'verbose_name_plural': 'Departamentos',
},
),
migrations.CreateModel(
name='Horario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('horario', models.IntegerField(choices=[(0, '07:00-08:00'), (1, '08:00-09:00'), (2, '09:00-10:00'), (3, '10:00-11:00'), (4, '11:00-12:00'), (5, '13:00-14:00'), (6, '14:00-15:00'), (7, '15:00-16:00'), (8, '16:00-17:00'), (9, '17:00-18:00'), (10, '18:00-19:00'), (11, '19:00-20:00'), (12, '20:00-21:00'), (13, '21:00-22:00'), (14, '07:00-07:50'), (15, '07:50-08:40'), (16, '08:40-09:30'), (17, '09:30-10:20'), (18, '10:20-11:10'), (19, '11:10-12:00'), (20, '13:00-13:50'), (21, '13:50-14:40'), (22, '14:40-15:30'), (23, '15:30-16:20'), (24, '16:20-17:10'), (25, '17:10-18:00')])),
('dia_da_semana', models.IntegerField(choices=[(1, 'Segunda-Feira'), (2, 'Terça-Feira'), (3, 'Quarta-Feira'), (4, 'Quinta-Feira'), (5, 'Sexta-Feira')])),
],
),
migrations.CreateModel(
name='Local',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigo', models.CharField(max_length=10, unique=True, validators=[django.core.validators.RegexValidator('[A-Za-z][1-5]-[0-2][0-9]', "Codigo de sala invalido. Ele deve seguir o formato: 'A9-99'")])),
('tipo_local', models.CharField(choices=[('1', 'Laboratorio'), ('2', 'Outros'), ('0', 'Sala')], max_length=1)),
('disciplina', models.CharField(choices=[('4', 'A2'), ('2', 'B1'), ('0', 'B2'), ('1', 'B3'), ('3', 'C2')], max_length=1)),
],
options={
'verbose_name': 'Local',
'verbose_name_plural': 'Locais',
},
),
migrations.CreateModel(
name='Materia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigo', models.CharField(max_length=20, unique=True, validators=[django.core.validators.RegexValidator('[^\\w]{1,4}.\\d{1,4}', "Codigo invalido. O codigo da materia deve seguir o formato'AAAA-9999'")])),
('nome', models.CharField(max_length=100, validators=[django.core.validators.RegexValidator('[^\\d]{1,}', 'Nome invalido. O nome da materia dever conter somente letras.')])),
('carga_horaria', models.IntegerField(validators=[django.core.validators.MinValueValidator(1)])),
],
),
migrations.CreateModel(
name='MateriaCorrente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('horarios', models.ManyToManyField(related_name='materiacorrente', to='core.Horario')),
('materia', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='materiacorrente', to='core.Materia')),
],
options={
'verbose_name': 'Materia Corrente',
'verbose_name_plural': 'Materias Correntes',
},
),
migrations.CreateModel(
name='Notificacao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('remetente', models.IntegerField(blank=True, null=True)),
('destinatario', models.IntegerField()),
('tipo_notificacao', models.CharField(choices=[('8', 'Aceitacao_Afastamento'), ('6', 'Aceitacao_Antecipacao'), ('4', 'Aceitacao_Justificativa'), ('5', 'Aceitacao_Reposicao'), ('9', 'Falta_Automatica'), ('0', 'Falta_Criada'), ('7', 'Requisicao_Afastamento'), ('3', 'Requisicao_Antecipacao'), ('1', 'Requisicao_Justificativa'), ('2', 'Requisicao_Reposicao')], max_length=1)),
('data', models.DateField(auto_now_add=True)),
('lida', models.BooleanField()),
('horario_materia', models.IntegerField(blank=True, null=True)),
('data_falta', models.DateField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Periodo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=30, unique=True, validators=[django.core.validators.RegexValidator('20[0-9]{2}.[1-2]{1}', "Nome invalido. O nome do período deve seguir o formato: '9999.9'")])),
('data_inicio', models.DateField()),
('data_termino', models.DateField()),
('is_atual', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Periodo',
'verbose_name_plural': 'Periodos',
},
),
migrations.CreateModel(
name='Turma',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigo', models.CharField(max_length=30, unique=True)),
('nome', models.CharField(max_length=30, unique=True)),
('turno', models.CharField(choices=[('0', 'Manha'), ('3', 'ManhaTarde'), ('5', 'ManhaTardeNoite'), ('2', 'Noite'), ('1', 'Tarde'), ('4', 'TardeNoite')], max_length=1)),
('curso', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='turma', to='core.Curso')),
('sala', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='turma', to='core.Local')),
],
options={
'verbose_name': 'Turma',
'verbose_name_plural': 'Turmas',
},
),
migrations.AddField(
model_name='materiacorrente',
name='periodo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='materiacorrente', to='core.Periodo'),
),
migrations.AddField(
model_name='materia',
name='turma',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='materia', to='core.Turma'),
),
migrations.AlterUniqueTogether(
name='horario',
unique_together={('horario', 'dia_da_semana')},
),
migrations.AddField(
model_name='departamento',
name='sala',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='departamento', to='core.Local'),
),
migrations.AddField(
model_name='curso',
name='departamento',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cursos', to='core.Departamento'),
),
migrations.CreateModel(
name='Professor',
fields=[
],
options={
'verbose_name': 'Professor',
'verbose_name_plural': 'Professores',
'proxy': True,
'indexes': [],
},
bases=('core.user',),
),
migrations.CreateModel(
name='Servidor',
fields=[
],
options={
'verbose_name': 'Servidor',
'verbose_name_plural': 'Servidores',
'proxy': True,
'indexes': [],
},
bases=('core.user',),
),
migrations.AddField(
model_name='materiacorrente',
name='professor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='materiacorrente', to='core.Professor'),
),
migrations.AlterUniqueTogether(
name='materia',
unique_together={('nome', 'turma')},
),
migrations.AddField(
model_name='departamento',
name='chefe',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='departamento_chefe', to='core.Professor'),
),
migrations.AddField(
model_name='departamento',
name='professores',
field=models.ManyToManyField(related_name='departamento', to='core.Professor'),
),
migrations.AddField(
model_name='curso',
name='coordenador',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='curso', to='core.Professor'),
),
migrations.AlterUniqueTogether(
name='user',
unique_together={('email',)},
),
]
|
diaspa-nds/cfar
|
core/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 13,647
|
from pylab import *
from sourceifc import sourceifc
class plotsamples:
class out_plot(sourceifc.out_callbacks):
def send_samples(self, timestamp, samples):
plot(samples)
show()
def __init__(self, src):
src.register_callbacks(plotsamples.out_plot())
def __del__(self):
pass
|
migueltorroja/rds-sdr
|
plotsamples.py
|
Python
|
mit
| 334
|
from . import sale_order
from . import stock_picking
|
Comunitea/CMNT_004_15
|
project-addons/ubl_edi_from_it/models/__init__.py
|
Python
|
agpl-3.0
| 53
|
# Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from itertools import chain as iter_chain
from itertools import combinations as iter_combinations
import eventlet
import mock
import netaddr
from neutron_lib import constants as l3_constants
from neutron_lib import exceptions as exc
from oslo_log import log
import oslo_messaging
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from testtools import matchers
from neutron.agent.common import config as agent_config
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3 import config as l3_config
from neutron.agent.l3 import dvr_edge_router as dvr_router
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import ha
from neutron.agent.l3 import legacy_router
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.l3 import namespaces
from neutron.agent.l3 import router_info as l3router
from neutron.agent.l3 import router_processing_queue
from neutron.agent.linux import dibbler
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import pd
from neutron.agent.linux import ra
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
from neutron.common import config as base_config
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.extensions import portbindings
from neutron.plugins.common import constants as p_const
from neutron.tests import base
from neutron.tests.common import l3_test_common
_uuid = uuidutils.generate_uuid
HOSTNAME = 'myhost'
FAKE_ID = _uuid()
FAKE_ID_2 = _uuid()
FIP_PRI = 32768
class BasicRouterOperationsFramework(base.BaseTestCase):
def setUp(self):
super(BasicRouterOperationsFramework, self).setUp()
mock.patch('eventlet.spawn').start()
self.conf = agent_config.setup_conf()
self.conf.register_opts(base_config.core_opts)
log.register_options(self.conf)
self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT')
self.conf.register_opts(l3_config.OPTS)
self.conf.register_opts(ha.OPTS)
agent_config.register_interface_driver_opts_helper(self.conf)
agent_config.register_process_monitor_opts(self.conf)
agent_config.register_availability_zone_opts_helper(self.conf)
self.conf.register_opts(interface.OPTS)
self.conf.register_opts(external_process.OPTS)
self.conf.register_opts(pd.OPTS)
self.conf.register_opts(ra.OPTS)
self.conf.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.conf.set_override('send_arp_for_ha', 1)
self.conf.set_override('state_path', '/tmp')
self.conf.set_override('ra_confs', '/tmp')
self.conf.set_override('pd_dhcp_driver', '')
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.ensure_dir = mock.patch('neutron.common.utils.ensure_dir').start()
mock.patch('neutron.agent.linux.keepalived.KeepalivedManager'
'.get_full_config_file_path').start()
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.utils_replace_file_p = mock.patch(
'neutron.common.utils.replace_file')
self.utils_replace_file = self.utils_replace_file_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.process_monitor = mock.patch(
'neutron.agent.linux.external_process.ProcessMonitor').start()
self.send_adv_notif_p = mock.patch(
'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif')
self.send_adv_notif = self.send_adv_notif_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
ip_rule = mock.patch('neutron.agent.linux.ip_lib.IPRule').start()
self.mock_rule = mock.MagicMock()
ip_rule.return_value = self.mock_rule
ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start()
self.mock_ip_dev = mock.MagicMock()
ip_dev.return_value = self.mock_ip_dev
self.l3pluginApi_cls_p = mock.patch(
'neutron.agent.l3.agent.L3PluginApi')
l3pluginApi_cls = self.l3pluginApi_cls_p.start()
self.plugin_api = mock.MagicMock()
l3pluginApi_cls.return_value = self.plugin_api
self.looping_call_p = mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
subnet_id_1 = _uuid()
subnet_id_2 = _uuid()
self.snat_ports = [{'subnets': [{'cidr': '152.2.0.0/16',
'gateway_ip': '152.2.0.1',
'id': subnet_id_1}],
'network_id': _uuid(),
'device_owner':
l3_constants.DEVICE_OWNER_ROUTER_SNAT,
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_1,
'ip_address': '152.2.0.13',
'prefixlen': 16}],
'id': _uuid(), 'device_id': _uuid()},
{'subnets': [{'cidr': '152.10.0.0/16',
'gateway_ip': '152.10.0.1',
'id': subnet_id_2}],
'network_id': _uuid(),
'device_owner':
l3_constants.DEVICE_OWNER_ROUTER_SNAT,
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_2,
'ip_address': '152.10.0.13',
'prefixlen': 16}],
'id': _uuid(), 'device_id': _uuid()}]
self.ri_kwargs = {'agent_conf': self.conf,
'interface_driver': self.mock_driver}
def _process_router_instance_for_agent(self, agent, ri, router):
ri.router = router
if not ri.radvd:
ri.radvd = ra.DaemonMonitor(router['id'],
ri.ns_name,
agent.process_monitor,
ri.get_internal_device_name,
self.conf)
ri.process(agent)
class TestBasicRouterOperations(BasicRouterOperationsFramework):
def test_init_ha_conf(self):
with mock.patch('os.path.dirname', return_value='/etc/ha/'):
l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.ensure_dir.assert_called_once_with('/etc/ha/')
def test_enqueue_state_change_router_not_found(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
non_existent_router = 42
# Make sure the exceptional code path has coverage
agent.enqueue_state_change(non_existent_router, 'master')
def test_enqueue_state_change_metadata_disable(self):
self.conf.set_override('enable_metadata_proxy', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = mock.Mock()
router_info = mock.MagicMock()
agent.router_info[router.id] = router_info
agent._update_metadata_proxy = mock.Mock()
agent.enqueue_state_change(router.id, 'master')
self.assertFalse(agent._update_metadata_proxy.call_count)
def test_periodic_sync_routers_task_raise_exception(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_router_ids.return_value = ['fake_id']
self.plugin_api.get_routers.side_effect = ValueError
self.assertRaises(ValueError,
agent.periodic_sync_routers_task,
agent.context)
self.assertTrue(agent.fullsync)
def test_l3_initial_report_state_done(self):
with mock.patch.object(l3_agent.L3NATAgentWithStateReport,
'periodic_sync_routers_task'),\
mock.patch.object(agent_rpc.PluginReportStateAPI,
'report_state') as report_state,\
mock.patch.object(eventlet, 'spawn_n'):
agent = l3_agent.L3NATAgentWithStateReport(host=HOSTNAME,
conf=self.conf)
self.assertTrue(agent.agent_state['start_flag'])
agent.after_start()
report_state.assert_called_once_with(agent.context,
agent.agent_state,
True)
self.assertIsNone(agent.agent_state.get('start_flag'))
def test_report_state_revival_logic(self):
with mock.patch.object(agent_rpc.PluginReportStateAPI,
'report_state') as report_state:
agent = l3_agent.L3NATAgentWithStateReport(host=HOSTNAME,
conf=self.conf)
report_state.return_value = n_const.AGENT_REVIVED
agent._report_state()
self.assertTrue(agent.fullsync)
agent.fullsync = False
report_state.return_value = n_const.AGENT_ALIVE
agent._report_state()
self.assertFalse(agent.fullsync)
def test_periodic_sync_routers_task_call_clean_stale_namespaces(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_routers.return_value = []
agent.periodic_sync_routers_task(agent.context)
self.assertFalse(agent.namespaces_manager._clean_stale)
def test_periodic_sync_routers_task_call_clean_stale_meta_proxies(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_router_ids = [_uuid(), _uuid()]
active_routers = [{'id': _uuid()}, {'id': _uuid()}]
self.plugin_api.get_router_ids.return_value = [r['id'] for r
in active_routers]
self.plugin_api.get_routers.return_value = active_routers
namespace_list = [namespaces.NS_PREFIX + r_id
for r_id in stale_router_ids]
namespace_list += [namespaces.NS_PREFIX + r['id']
for r in active_routers]
self.mock_ip.get_namespaces.return_value = namespace_list
driver = metadata_driver.MetadataDriver
with mock.patch.object(
driver, 'destroy_monitored_metadata_proxy') as destroy_proxy:
agent.periodic_sync_routers_task(agent.context)
expected_calls = [mock.call(mock.ANY, r_id, agent.conf)
for r_id in stale_router_ids]
self.assertEqual(len(stale_router_ids), destroy_proxy.call_count)
destroy_proxy.assert_has_calls(expected_calls, any_order=True)
def test_router_info_create(self):
id = _uuid()
ri = l3router.RouterInfo(id, {}, **self.ri_kwargs)
self.assertTrue(ri.ns_name.endswith(id))
def test_router_info_create_with_router(self):
ns_id = _uuid()
subnet_id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}]}
router = {
'id': _uuid(),
'enable_snat': True,
'routes': [],
'gw_port': ex_gw_port}
ri = l3router.RouterInfo(ns_id, router, **self.ri_kwargs)
self.assertTrue(ri.ns_name.endswith(ns_id))
self.assertEqual(router, ri.router)
def test_agent_create(self):
l3_agent.L3NATAgent(HOSTNAME, self.conf)
def _test_internal_network_action(self, action):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router_id = router['id']
ri = l3router.RouterInfo(router_id, router, **self.ri_kwargs)
port = {'network_id': _uuid(),
'id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'fixed_ips': [{'subnet_id': _uuid(),
'ip_address': '99.0.1.9',
'prefixlen': 24}]}
interface_name = ri.get_internal_device_name(port['id'])
if action == 'add':
self.device_exists.return_value = False
ri.internal_network_added(port)
self.assertEqual(1, self.mock_driver.plug.call_count)
self.assertEqual(1, self.mock_driver.init_router_port.call_count)
self.send_adv_notif.assert_called_once_with(ri.ns_name,
interface_name,
'99.0.1.9', mock.ANY)
elif action == 'remove':
self.device_exists.return_value = True
ri.internal_network_removed(port)
self.assertEqual(1, self.mock_driver.unplug.call_count)
else:
raise Exception("Invalid action %s" % action)
@staticmethod
def _fixed_ip_cidr(fixed_ip):
return '%s/%s' % (fixed_ip['ip_address'], fixed_ip['prefixlen'])
def _test_internal_network_action_dist(self, action):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router_id = router['id']
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(
agent, HOSTNAME, router_id, router, **self.ri_kwargs)
subnet_id = _uuid()
port = {'network_id': _uuid(),
'id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'fixed_ips': [{'subnet_id': subnet_id,
'ip_address': '99.0.1.9',
'prefixlen': 24}],
'subnets': [{'id': subnet_id}]}
ri.router['gw_port_host'] = HOSTNAME
agent.host = HOSTNAME
agent.conf.agent_mode = 'dvr_snat'
sn_port = {'fixed_ips': [{'ip_address': '20.0.0.31',
'subnet_id': _uuid()}],
'subnets': [{'gateway_ip': '20.0.0.1'}],
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': _uuid()}],
'subnets': [{'gateway_ip': '20.0.0.1'}],
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
portbindings.HOST_ID: HOSTNAME,
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
ri.snat_ports = sn_port
ri.ex_gw_port = ex_gw_port
ri.snat_namespace = mock.Mock()
if action == 'add':
self.device_exists.return_value = False
ri.get_snat_port_for_internal_port = mock.Mock(
return_value=sn_port)
ri._snat_redirect_add = mock.Mock()
ri._set_subnet_arp_info = mock.Mock()
ri._internal_network_added = mock.Mock()
ri._set_subnet_arp_info = mock.Mock()
ri.internal_network_added(port)
self.assertEqual(1, ri._snat_redirect_add.call_count)
self.assertEqual(2, ri._internal_network_added.call_count)
ri._set_subnet_arp_info.assert_called_once_with(subnet_id)
ri._internal_network_added.assert_called_with(
dvr_snat_ns.SnatNamespace.get_snat_ns_name(ri.router['id']),
sn_port['network_id'],
sn_port['id'],
sn_port['fixed_ips'],
sn_port['mac_address'],
ri._get_snat_int_device_name(sn_port['id']),
dvr_snat_ns.SNAT_INT_DEV_PREFIX,
mtu=None)
elif action == 'remove':
self.device_exists.return_value = False
ri.get_snat_port_for_internal_port = mock.Mock(
return_value=sn_port)
ri._delete_arp_cache_for_internal_port = mock.Mock()
ri._snat_redirect_modify = mock.Mock()
ri.internal_network_removed(port)
self.assertEqual(
1, ri._delete_arp_cache_for_internal_port.call_count)
ri._snat_redirect_modify.assert_called_with(
sn_port, port,
ri.get_internal_device_name(port['id']),
is_add=False)
def test_agent_add_internal_network(self):
self._test_internal_network_action('add')
def test_agent_add_internal_network_dist(self):
self._test_internal_network_action_dist('add')
def test_agent_remove_internal_network(self):
self._test_internal_network_action('remove')
def test_agent_remove_internal_network_dist(self):
self._test_internal_network_action_dist('remove')
def _add_external_gateway(self, ri, router, ex_gw_port, interface_name,
use_fake_fip=False,
no_subnet=False, no_sub_gw=None,
dual_stack=False):
self.device_exists.return_value = False
if no_sub_gw is None:
no_sub_gw = []
if use_fake_fip:
fake_fip = {'floatingips': [{'id': _uuid(),
'floating_ip_address': '192.168.1.34',
'fixed_ip_address': '192.168.0.1',
'port_id': _uuid()}]}
router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips']
ri.external_gateway_added(ex_gw_port, interface_name)
if not router.get('distributed'):
self.assertEqual(1, self.mock_driver.plug.call_count)
self.assertEqual(1, self.mock_driver.init_router_port.call_count)
if no_subnet and not dual_stack:
self.assertEqual(0, self.send_adv_notif.call_count)
ip_cidrs = []
kwargs = {'preserve_ips': [],
'namespace': 'qrouter-' + router['id'],
'extra_subnets': [],
'clean_connections': True}
else:
exp_arp_calls = [mock.call(ri.ns_name, interface_name,
'20.0.0.30', mock.ANY)]
if dual_stack and not no_sub_gw:
exp_arp_calls += [mock.call(ri.ns_name, interface_name,
'2001:192:168:100::2',
mock.ANY)]
self.send_adv_notif.assert_has_calls(exp_arp_calls)
ip_cidrs = ['20.0.0.30/24']
if dual_stack:
if not no_sub_gw:
ip_cidrs.append('2001:192:168:100::2/64')
kwargs = {'preserve_ips': ['192.168.1.34/32'],
'namespace': 'qrouter-' + router['id'],
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'clean_connections': True}
self.mock_driver.init_router_port.assert_called_with(
interface_name, ip_cidrs, **kwargs)
else:
ri._create_dvr_gateway.assert_called_once_with(
ex_gw_port, interface_name)
def _test_external_gateway_action(self, action, router, dual_stack=False):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ex_net_id = _uuid()
sn_port = self.snat_ports[1]
# Special setup for dvr routers
if router.get('distributed'):
agent.conf.agent_mode = 'dvr_snat'
agent.host = HOSTNAME
ri = dvr_router.DvrEdgeRouter(agent,
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
ri._create_dvr_gateway = mock.Mock()
ri.get_snat_interfaces = mock.Mock(return_value=self.snat_ports)
ri.snat_ports = self.snat_ports
ri._create_snat_namespace()
ri.fip_ns = agent.get_fip_ns(ex_net_id)
ri.internal_ports = self.snat_ports
else:
ri = l3router.RouterInfo(
router['id'], router,
**self.ri_kwargs)
ri.use_ipv6 = False
subnet_id = _uuid()
fixed_ips = [{'subnet_id': subnet_id,
'ip_address': '20.0.0.30',
'prefixlen': 24}]
subnets = [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}]
if dual_stack:
ri.use_ipv6 = True
subnet_id_v6 = _uuid()
fixed_ips.append({'subnet_id': subnet_id_v6,
'ip_address': '2001:192:168:100::2',
'prefixlen': 64})
subnets.append({'id': subnet_id_v6,
'cidr': '2001:192:168:100::/64',
'gateway_ip': '2001:192:168:100::1'})
ex_gw_port = {'fixed_ips': fixed_ips,
'subnets': subnets,
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
'network_id': ex_net_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
ex_gw_port_no_sub = {'fixed_ips': [],
'id': _uuid(),
'network_id': ex_net_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
interface_name = ri.get_external_device_name(ex_gw_port['id'])
if action == 'add':
self._add_external_gateway(ri, router, ex_gw_port, interface_name,
use_fake_fip=True,
dual_stack=dual_stack)
elif action == 'add_no_sub':
ri.use_ipv6 = True
self._add_external_gateway(ri, router, ex_gw_port_no_sub,
interface_name,
no_subnet=True)
elif action == 'add_no_sub_v6_gw':
ri.use_ipv6 = True
self.conf.set_override('ipv6_gateway',
'fe80::f816:3eff:fe2e:1')
if dual_stack:
use_fake_fip = True
# Remove v6 entries
del ex_gw_port['fixed_ips'][-1]
del ex_gw_port['subnets'][-1]
else:
use_fake_fip = False
ex_gw_port = ex_gw_port_no_sub
self._add_external_gateway(ri, router, ex_gw_port,
interface_name, no_subnet=True,
no_sub_gw='fe80::f816:3eff:fe2e:1',
use_fake_fip=use_fake_fip,
dual_stack=dual_stack)
elif action == 'remove':
self.device_exists.return_value = True
ri.get_snat_port_for_internal_port = mock.Mock(
return_value=sn_port)
ri._snat_redirect_remove = mock.Mock()
ri.external_gateway_removed(ex_gw_port, interface_name)
if not router.get('distributed'):
self.mock_driver.unplug.assert_called_once_with(
interface_name,
bridge=agent.conf.external_network_bridge,
namespace=mock.ANY,
prefix=mock.ANY)
else:
ri._snat_redirect_remove.assert_called_with(
sn_port, sn_port,
ri.get_internal_device_name(sn_port['id']))
ri.get_snat_port_for_internal_port.assert_called_with(
mock.ANY, ri.snat_ports)
else:
raise Exception("Invalid action %s" % action)
def _test_external_gateway_updated(self, dual_stack=False):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.use_ipv6 = False
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(
self, ri, dual_stack=dual_stack)
fake_fip = {'floatingips': [{'id': _uuid(),
'floating_ip_address': '192.168.1.34',
'fixed_ip_address': '192.168.0.1',
'port_id': _uuid()}]}
router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips']
ri.external_gateway_updated(ex_gw_port, interface_name)
self.assertEqual(1, self.mock_driver.plug.call_count)
self.assertEqual(1, self.mock_driver.init_router_port.call_count)
exp_arp_calls = [mock.call(ri.ns_name, interface_name,
'20.0.0.30', mock.ANY)]
if dual_stack:
ri.use_ipv6 = True
exp_arp_calls += [mock.call(ri.ns_name, interface_name,
'2001:192:168:100::2', mock.ANY)]
self.send_adv_notif.assert_has_calls(exp_arp_calls)
ip_cidrs = ['20.0.0.30/24']
gateway_ips = ['20.0.0.1']
if dual_stack:
ip_cidrs.append('2001:192:168:100::2/64')
gateway_ips.append('2001:192:168:100::1')
kwargs = {'preserve_ips': ['192.168.1.34/32'],
'namespace': 'qrouter-' + router['id'],
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'clean_connections': True}
self.mock_driver.init_router_port.assert_called_with(interface_name,
ip_cidrs,
**kwargs)
def test_external_gateway_updated(self):
self._test_external_gateway_updated()
def test_external_gateway_updated_dual_stack(self):
self._test_external_gateway_updated(dual_stack=True)
def test_dvr_edge_router_init_for_snat_namespace_object(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
ri = dvr_router.DvrEdgeRouter(mock.Mock(),
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
# Makesure that ri.snat_namespace object is created when the
# router is initialized
self.assertIsNotNone(ri.snat_namespace)
@mock.patch.object(dvr_snat_ns.SnatNamespace, 'delete')
def test_ext_gw_updated_calling_snat_ns_delete_if_gw_port_host_none(
self, mock_snat_ns):
"""Function to check the impact of snat_namespace object.
This function specifically checks the impact of the snat
namespace object value on external_gateway_removed for deleting
snat_namespace when the gw_port_host mismatches or none.
"""
router = l3_test_common.prepare_router_data(num_internal_ports=2)
ri = dvr_router.DvrEdgeRouter(mock.Mock(),
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
ri._create_snat_namespace()
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self,
ri)
router['gw_port_host'] = ''
ri.external_gateway_updated(ex_gw_port, interface_name)
if router['gw_port_host'] != ri.host:
if ri.snat_namespace:
self.assertEqual(1, mock_snat_ns.call_count)
@mock.patch.object(dvr_snat_ns.SnatNamespace, 'delete')
def test_ext_gw_updated_not_calling_snat_ns_delete_if_gw_port_host_none(
self, mock_snat_ns):
"""Function to check the impact of snat_namespace object.
This function specifically checks the impact of the snat
namespace object value on external_gateway_removed for deleting
snat_namespace when gw_port_host mismatches and when the
self.snat_namespace is None.
"""
router = l3_test_common.prepare_router_data(num_internal_ports=2)
ri = dvr_router.DvrEdgeRouter(mock.Mock(),
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
ri._create_snat_namespace()
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self,
ri)
router['gw_port_host'] = ''
# Initialize the snat_namespace object to the None here to emulate
# an agent restart.
ri.snat_namespace = None
ri.external_gateway_updated(ex_gw_port, interface_name)
if router['gw_port_host'] != ri.host:
if ri.snat_namespace is None:
self.assertFalse(mock_snat_ns.called)
@mock.patch.object(namespaces.Namespace, 'delete')
def test_snat_ns_delete_not_called_when_snat_namespace_does_not_exist(
self, mock_ns_del):
"""Function to check the impact of snat_namespace object.
This function specifically checks the impact of the snat
namespace object initialization without the actual creation
of snat_namespace. When deletes are issued to the snat
namespace based on the snat namespace object existence, it
should be checking for the valid namespace existence before
it tries to delete.
"""
router = l3_test_common.prepare_router_data(num_internal_ports=2)
ri = dvr_router.DvrEdgeRouter(mock.Mock(),
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
# Make sure we set a return value to emulate the non existence
# of the namespace.
self.mock_ip.netns.exists.return_value = False
self.assertTrue(ri.snat_namespace)
if ri.snat_namespace:
ri.snat_namespace.delete()
self.assertFalse(mock_ns_del.called)
def _test_ext_gw_updated_dvr_edge_router(self, host_match,
snat_hosted_before=True):
"""
Helper to test external gw update for edge router on dvr_snat agent
:param host_match: True if new gw host should be the same as agent host
:param snat_hosted_before: True if agent has already been hosting
snat for the router
"""
router = l3_test_common.prepare_router_data(num_internal_ports=2)
ri = dvr_router.DvrEdgeRouter(mock.Mock(),
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
if snat_hosted_before:
ri._create_snat_namespace()
snat_ns_name = ri.snat_namespace.name
else:
self.assertIsNotNone(ri.snat_namespace)
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self,
ri)
ri._external_gateway_added = mock.Mock()
router['gw_port_host'] = ri.host if host_match else (ri.host + 'foo')
ri.external_gateway_updated(ex_gw_port, interface_name)
if not host_match:
self.assertFalse(ri._external_gateway_added.called)
if snat_hosted_before:
# host mismatch means that snat was rescheduled to another
# agent, hence need to verify that gw port was unplugged and
# snat namespace was deleted
self.mock_driver.unplug.assert_called_with(
interface_name,
bridge=self.conf.external_network_bridge,
namespace=snat_ns_name,
prefix=l3_agent.EXTERNAL_DEV_PREFIX)
self.assertIsNone(ri.snat_namespace)
else:
if not snat_hosted_before:
self.assertIsNotNone(ri.snat_namespace)
self.assertTrue(ri._external_gateway_added.called)
def test_ext_gw_updated_dvr_edge_router(self):
self._test_ext_gw_updated_dvr_edge_router(host_match=True)
def test_ext_gw_updated_dvr_edge_router_host_mismatch(self):
self._test_ext_gw_updated_dvr_edge_router(host_match=False)
def test_ext_gw_updated_dvr_dvr_edge_router_snat_rescheduled(self):
self._test_ext_gw_updated_dvr_edge_router(host_match=True,
snat_hosted_before=False)
def test_agent_add_external_gateway(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('add', router)
def test_agent_add_external_gateway_dual_stack(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('add', router, dual_stack=True)
def test_agent_add_external_gateway_dist(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('add', router)
def test_agent_add_external_gateway_dist_dual_stack(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('add', router, dual_stack=True)
def test_agent_add_external_gateway_no_subnet(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2,
v6_ext_gw_with_sub=False)
self._test_external_gateway_action('add_no_sub', router)
def test_agent_add_external_gateway_no_subnet_with_ipv6_gw(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2,
v6_ext_gw_with_sub=False)
self._test_external_gateway_action('add_no_sub_v6_gw', router)
def test_agent_add_external_gateway_dual_stack_no_subnet_w_ipv6_gw(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2,
v6_ext_gw_with_sub=False)
self._test_external_gateway_action('add_no_sub_v6_gw',
router, dual_stack=True)
def test_agent_remove_external_gateway(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('remove', router)
def test_agent_remove_external_gateway_dual_stack(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('remove', router, dual_stack=True)
def test_agent_remove_external_gateway_dist(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('remove', router)
def test_agent_remove_external_gateway_dist_dual_stack(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('remove', router, dual_stack=True)
def _verify_snat_mangle_rules(self, nat_rules, mangle_rules, router,
negate=False):
interfaces = router[l3_constants.INTERFACE_KEY]
source_cidrs = []
for iface in interfaces:
for subnet in iface['subnets']:
prefix = subnet['cidr'].split('/')[1]
source_cidr = "%s/%s" % (iface['fixed_ips'][0]['ip_address'],
prefix)
source_cidrs.append(source_cidr)
source_nat_ip = router['gw_port']['fixed_ips'][0]['ip_address']
interface_name = ('qg-%s' % router['gw_port']['id'])[:14]
expected_rules = [
'! -i %s ! -o %s -m conntrack ! --ctstate DNAT -j ACCEPT' %
(interface_name, interface_name),
'-o %s -j SNAT --to-source %s' % (interface_name, source_nat_ip),
'-m mark ! --mark 0x2/%s -m conntrack --ctstate DNAT '
'-j SNAT --to-source %s' %
(n_const.ROUTER_MARK_MASK, source_nat_ip)]
for r in nat_rules:
if negate:
self.assertNotIn(r.rule, expected_rules)
else:
self.assertIn(r.rule, expected_rules)
expected_rules = [
'-i %s -j MARK --set-xmark 0x2/%s' %
(interface_name, n_const.ROUTER_MARK_MASK),
'-o %s -m connmark --mark 0x0/%s -j CONNMARK '
'--save-mark --nfmask %s --ctmask %s' %
(interface_name,
l3router.ADDRESS_SCOPE_MARK_MASK,
l3router.ADDRESS_SCOPE_MARK_MASK,
l3router.ADDRESS_SCOPE_MARK_MASK)]
for r in mangle_rules:
if negate:
self.assertNotIn(r.rule, expected_rules)
else:
self.assertIn(r.rule, expected_rules)
def test_get_snat_port_for_internal_port(self):
router = l3_test_common.prepare_router_data(num_internal_ports=4)
ri = dvr_router.DvrEdgeRouter(mock.sentinel.agent,
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
test_port = {
'mac_address': '00:12:23:34:45:56',
'fixed_ips': [{'subnet_id': l3_test_common.get_subnet_id(
router[l3_constants.INTERFACE_KEY][0]),
'ip_address': '101.12.13.14'}]}
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
# test valid case
with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces:
get_interfaces.return_value = [test_port]
res_port = ri.get_snat_port_for_internal_port(internal_ports[0])
self.assertEqual(test_port, res_port)
# test invalid case
test_port['fixed_ips'][0]['subnet_id'] = 1234
res_ip = ri.get_snat_port_for_internal_port(internal_ports[0])
self.assertNotEqual(test_port, res_ip)
self.assertIsNone(res_ip)
def test_process_cent_router(self):
router = l3_test_common.prepare_router_data()
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
self._test_process_router(ri, agent)
def test_process_dist_router(self):
router = l3_test_common.prepare_router_data()
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(agent,
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
subnet_id = l3_test_common.get_subnet_id(
router[l3_constants.INTERFACE_KEY][0])
ri.router['distributed'] = True
ri.router['_snat_router_interfaces'] = [{
'fixed_ips': [{'subnet_id': subnet_id,
'ip_address': '1.2.3.4'}]}]
ri.router['gw_port_host'] = None
self._test_process_router(ri, agent)
def _test_process_router(self, ri, agent):
router = ri.router
agent.host = HOSTNAME
fake_fip_id = 'fake_fip_id'
ri.create_dvr_fip_interfaces = mock.Mock()
ri.process_floating_ip_addresses = mock.Mock()
ri.process_floating_ip_nat_rules = mock.Mock()
ri.process_floating_ip_addresses.return_value = {
fake_fip_id: 'ACTIVE'}
ri.external_gateway_added = mock.Mock()
ri.external_gateway_updated = mock.Mock()
ri.process_address_scope = mock.Mock()
fake_floatingips1 = {'floatingips': [
{'id': fake_fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': _uuid(),
'host': HOSTNAME}]}
ri.process(agent)
ri.process_floating_ip_addresses.assert_called_with(mock.ANY)
ri.process_floating_ip_addresses.reset_mock()
ri.process_floating_ip_nat_rules.assert_called_with()
ri.process_floating_ip_nat_rules.reset_mock()
ri.external_gateway_added.reset_mock()
# remap floating IP to a new fixed ip
fake_floatingips2 = copy.deepcopy(fake_floatingips1)
fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8'
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips']
ri.process(agent)
ri.process_floating_ip_addresses.assert_called_with(mock.ANY)
ri.process_floating_ip_addresses.reset_mock()
ri.process_floating_ip_nat_rules.assert_called_with()
ri.process_floating_ip_nat_rules.reset_mock()
self.assertEqual(0, ri.external_gateway_added.call_count)
self.assertEqual(0, ri.external_gateway_updated.call_count)
ri.external_gateway_added.reset_mock()
ri.external_gateway_updated.reset_mock()
# change the ex_gw_port a bit to test gateway update
new_gw_port = copy.deepcopy(ri.router['gw_port'])
ri.router['gw_port'] = new_gw_port
old_ip = (netaddr.IPAddress(ri.router['gw_port']
['fixed_ips'][0]['ip_address']))
ri.router['gw_port']['fixed_ips'][0]['ip_address'] = str(old_ip + 1)
ri.process(agent)
ri.process_floating_ip_addresses.reset_mock()
ri.process_floating_ip_nat_rules.reset_mock()
self.assertEqual(0, ri.external_gateway_added.call_count)
self.assertEqual(1, ri.external_gateway_updated.call_count)
# remove just the floating ips
del router[l3_constants.FLOATINGIP_KEY]
ri.process(agent)
ri.process_floating_ip_addresses.assert_called_with(mock.ANY)
ri.process_floating_ip_addresses.reset_mock()
ri.process_floating_ip_nat_rules.assert_called_with()
ri.process_floating_ip_nat_rules.reset_mock()
# now no ports so state is torn down
del router[l3_constants.INTERFACE_KEY]
del router['gw_port']
ri.process(agent)
self.assertEqual(1, self.send_adv_notif.call_count)
distributed = ri.router.get('distributed', False)
self.assertEqual(distributed, ri.process_floating_ip_addresses.called)
self.assertEqual(distributed, ri.process_floating_ip_nat_rules.called)
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def _test_process_floating_ip_addresses_add(self, ri, agent, IPDevice):
floating_ips = ri.get_floating_ips()
fip_id = floating_ips[0]['id']
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = []
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
ex_gw_port = {'id': _uuid(), 'network_id': mock.sentinel.ext_net_id}
ri.add_floating_ip = mock.Mock(
return_value=l3_constants.FLOATINGIP_STATUS_ACTIVE)
with mock.patch.object(lla.LinkLocalAllocator, '_write'):
if ri.router['distributed']:
ri.fip_ns = agent.get_fip_ns(ex_gw_port['network_id'])
ri.create_dvr_fip_interfaces(ex_gw_port)
fip_statuses = ri.process_floating_ip_addresses(
mock.sentinel.interface_name)
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE},
fip_statuses)
ri.add_floating_ip.assert_called_once_with(
floating_ips[0], mock.sentinel.interface_name, device)
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_create_dvr_fip_interfaces_if_fipnamespace_exist(self, lla_write):
fake_network_id = _uuid()
subnet_id = _uuid()
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '20.0.0.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
agent_gateway_port = (
[{'fixed_ips': [
{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [
{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': fake_network_id,
'mac_address': 'ca:fe:de:ad:be:ef'}]
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router[n_const.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
ext_gw_port = ri.router.get('gw_port')
ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
ri.dist_fip_count = 0
agent.process_router_add = mock.Mock()
ri.fip_ns.create_rtr_2_fip_link = mock.Mock()
with mock.patch.object(ri, 'get_floating_ips') as fips, \
mock.patch.object(ri.fip_ns,
'create') as create_fip, \
mock.patch.object(ri, 'get_floating_agent_gw_interface'
) as fip_gw_port:
fips.return_value = fake_floatingips
fip_gw_port.return_value = agent_gateway_port[0]
ri.create_dvr_fip_interfaces(ext_gw_port)
self.assertTrue(fip_gw_port.called)
self.assertTrue(fips.called)
self.assertTrue(create_fip.called)
self.assertEqual(agent_gateway_port[0],
ri.fip_ns.agent_gateway_port)
# Now let us associate the fip to the router
ri.floating_ip_added_dist(fips, "192.168.0.1/32")
self.assertEqual(1, ri.dist_fip_count)
# Now let us disassociate the fip from the router
ri.floating_ip_removed_dist("192.168.0.1/32")
self.assertEqual(0, ri.dist_fip_count)
# Calling create_dvr_fip_interfaces again to make sure
# that the fip namespace create is not called again.
# If the create is not called again, that would contain
# the duplicate rules configuration in the fip namespace.
ri.create_dvr_fip_interfaces(ext_gw_port)
self.assertTrue(fip_gw_port.called)
self.assertTrue(fips.called)
create_fip.assert_called_once_with()
self.assertEqual(2, ri.fip_ns.create_rtr_2_fip_link.call_count)
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_create_dvr_fip_interfaces_for_late_binding(self, lla_write):
fake_network_id = _uuid()
fake_subnet_id = _uuid()
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '20.0.0.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
agent_gateway_port = (
{'fixed_ips': [
{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': fake_subnet_id}],
'subnets': [
{'id': fake_subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': fake_network_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router[n_const.FLOATINGIP_AGENT_INTF_KEY] = []
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
ext_gw_port = ri.router.get('gw_port')
ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
ri.dist_fip_count = 0
ri.fip_ns.subscribe = mock.Mock()
with mock.patch.object(agent.plugin_rpc,
'get_agent_gateway_port') as fip_gw_port:
fip_gw_port.return_value = agent_gateway_port
ri.create_dvr_fip_interfaces(ext_gw_port)
self.assertTrue(fip_gw_port.called)
self.assertEqual(agent_gateway_port,
ri.fip_ns.agent_gateway_port)
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_create_dvr_fip_interfaces(self, lla_write):
fake_network_id = _uuid()
subnet_id = _uuid()
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '20.0.0.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
agent_gateway_port = (
[{'fixed_ips': [
{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [
{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': fake_network_id,
'mac_address': 'ca:fe:de:ad:be:ef'}]
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router[n_const.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
ext_gw_port = ri.router.get('gw_port')
ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
ri.dist_fip_count = 0
ri.fip_ns.subscribe = mock.Mock()
ri.fip_ns.agent_router_gateway = mock.Mock()
agent.process_router_add = mock.Mock()
with mock.patch.object(ri, 'get_floating_ips') as fips, \
mock.patch.object(ri, 'get_floating_agent_gw_interface'
) as fip_gw_port:
fips.return_value = fake_floatingips
fip_gw_port.return_value = agent_gateway_port[0]
ri.create_dvr_fip_interfaces(ext_gw_port)
self.assertTrue(fip_gw_port.called)
self.assertTrue(fips.called)
self.assertEqual(agent_gateway_port[0],
ri.fip_ns.agent_gateway_port)
self.assertTrue(ri.rtr_fip_subnet)
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_create_dvr_fip_interfaces_for_restart_l3agent_case(self,
lla_write):
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '20.0.0.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
agent_gateway_port = (
[{'fixed_ips': [
{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': 'subnet_id'}],
'subnets': [
{'id': 'subnet_id',
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': 'fake_network_id',
'mac_address': 'ca:fe:de:ad:be:ef'}]
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router[n_const.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
ext_gw_port = ri.router.get('gw_port')
ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
ri.fip_ns.subscribe = mock.Mock(return_value=True)
ri.fip_ns.agent_router_gateway = mock.Mock()
ri.rtr_fip_subnet = None
ri.dist_fip_count = 0
with mock.patch.object(ri, 'get_floating_ips') as fips,\
mock.patch.object(ri, 'get_floating_agent_gw_interface'
) as fip_gw_port:
fips.return_value = fake_floatingips
fip_gw_port.return_value = agent_gateway_port[0]
ri.create_dvr_fip_interfaces(ext_gw_port)
self.assertTrue(fip_gw_port.called)
self.assertTrue(fips.called)
self.assertEqual(agent_gateway_port[0],
ri.fip_ns.agent_gateway_port)
self.assertTrue(ri.rtr_fip_subnet)
def test_process_router_cent_floating_ip_add(self):
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'status': 'DOWN',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
router = l3_test_common.prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
ri.get_external_device_name = mock.Mock(return_value='exgw')
self._test_process_floating_ip_addresses_add(ri, agent)
def test_process_router_snat_disabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(enable_snat=True)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process with NAT
ri.process(agent)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
orig_mangle_rules = ri.iptables_manager.ipv4['mangle'].rules[:]
# Reprocess without NAT
router['enable_snat'] = False
# Reassign the router object to RouterInfo
ri.router = router
ri.process(agent)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in orig_nat_rules
if r not in ri.iptables_manager.ipv4['nat'].rules]
self.assertEqual(1, len(nat_rules_delta))
mangle_rules_delta = [
r for r in orig_mangle_rules
if r not in ri.iptables_manager.ipv4['mangle'].rules]
self.assertEqual(1, len(mangle_rules_delta))
self._verify_snat_mangle_rules(nat_rules_delta, mangle_rules_delta,
router)
self.assertEqual(1, self.send_adv_notif.call_count)
def test_process_router_snat_enabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(enable_snat=False)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process without NAT
ri.process(agent)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
orig_mangle_rules = ri.iptables_manager.ipv4['mangle'].rules[:]
# Reprocess with NAT
router['enable_snat'] = True
# Reassign the router object to RouterInfo
ri.router = router
ri.process(agent)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
if r not in orig_nat_rules]
self.assertEqual(1, len(nat_rules_delta))
mangle_rules_delta = [
r for r in ri.iptables_manager.ipv4['mangle'].rules
if r not in orig_mangle_rules]
self.assertEqual(1, len(mangle_rules_delta))
self._verify_snat_mangle_rules(nat_rules_delta, mangle_rules_delta,
router)
self.assertEqual(1, self.send_adv_notif.call_count)
def _test_update_routing_table(self, is_snat_host=True):
router = l3_test_common.prepare_router_data()
uuid = router['id']
s_netns = 'snat-' + uuid
q_netns = 'qrouter-' + uuid
fake_route1 = {'destination': '135.207.0.0/16',
'nexthop': '19.4.4.200'}
calls = [mock.call('replace', fake_route1, q_netns)]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(
agent,
HOSTNAME,
uuid,
router,
**self.ri_kwargs)
ri._update_routing_table = mock.Mock()
with mock.patch.object(ri, '_is_this_snat_host') as snat_host:
snat_host.return_value = is_snat_host
ri.update_routing_table('replace', fake_route1)
if is_snat_host:
ri._update_routing_table('replace', fake_route1, s_netns)
calls += [mock.call('replace', fake_route1, s_netns)]
ri._update_routing_table.assert_has_calls(calls, any_order=True)
def test_process_update_snat_routing_table(self):
self._test_update_routing_table()
def test_process_not_update_snat_routing_table(self):
self._test_update_routing_table(is_snat_host=False)
def test_process_router_interface_added(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process with NAT
ri.process(agent)
# Add an interface and reprocess
l3_test_common.router_append_interface(router)
# Reassign the router object to RouterInfo
ri.router = router
ri.process(agent)
# send_ip_addr_adv_notif is called both times process is called
self.assertEqual(2, self.send_adv_notif.call_count)
def _test_process_ipv6_only_or_dual_stack_gw(self, dual_stack=False):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(ip_version=6,
dual_stack=dual_stack)
# Get NAT rules without the gw_port
gw_port = router['gw_port']
router['gw_port'] = None
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
self._process_router_instance_for_agent(agent, ri, router)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Get NAT rules with the gw_port
router['gw_port'] = gw_port
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
p = ri.external_gateway_nat_fip_rules
s = ri.external_gateway_nat_snat_rules
attrs_to_mock = dict(
[(a, mock.DEFAULT) for a in
['external_gateway_nat_fip_rules',
'external_gateway_nat_snat_rules']]
)
with mock.patch.multiple(ri, **attrs_to_mock) as mocks:
mocks['external_gateway_nat_fip_rules'].side_effect = p
mocks['external_gateway_nat_snat_rules'].side_effect = s
self._process_router_instance_for_agent(agent, ri, router)
new_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# NAT rules should only change for dual_stack operation
if dual_stack:
self.assertTrue(
mocks['external_gateway_nat_fip_rules'].called)
self.assertTrue(
mocks['external_gateway_nat_snat_rules'].called)
self.assertNotEqual(orig_nat_rules, new_nat_rules)
else:
self.assertFalse(
mocks['external_gateway_nat_fip_rules'].called)
self.assertFalse(
mocks['external_gateway_nat_snat_rules'].called)
self.assertEqual(orig_nat_rules, new_nat_rules)
def test_process_ipv6_only_gw(self):
self._test_process_ipv6_only_or_dual_stack_gw()
def test_process_dual_stack_gw(self):
self._test_process_ipv6_only_or_dual_stack_gw(dual_stack=True)
def _process_router_ipv6_interface_added(
self, router, ra_mode=None, addr_mode=None):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process with NAT
ri.process(agent)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Add an IPv6 interface and reprocess
l3_test_common.router_append_interface(router, count=1,
ip_version=6, ra_mode=ra_mode,
addr_mode=addr_mode)
# Reassign the router object to RouterInfo
self._process_router_instance_for_agent(agent, ri, router)
# IPv4 NAT rules should not be changed by adding an IPv6 interface
nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
if r not in orig_nat_rules]
self.assertFalse(nat_rules_delta)
return ri
def _radvd_expected_call_external_process(self, ri, enable=True):
expected_calls = [mock.call(uuid=ri.router['id'],
service='radvd',
default_cmd_callback=mock.ANY,
namespace=ri.ns_name,
conf=mock.ANY,
run_as_root=True)]
if enable:
expected_calls.append(mock.call().enable(reload_cfg=True))
else:
expected_calls.append(mock.call().disable())
return expected_calls
def _process_router_ipv6_subnet_added(self, router,
ipv6_subnet_modes=None, dns_nameservers=None, network_mtu=0):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
agent.external_gateway_added = mock.Mock()
self._process_router_instance_for_agent(agent, ri, router)
# Add an IPv6 interface with len(ipv6_subnet_modes) subnets
# and reprocess
l3_test_common.router_append_subnet(
router,
count=len(ipv6_subnet_modes),
ip_version=6,
ipv6_subnet_modes=ipv6_subnet_modes,
dns_nameservers=dns_nameservers,
network_mtu=network_mtu)
# Reassign the router object to RouterInfo
self._process_router_instance_for_agent(agent, ri, router)
return ri
def _assert_ri_process_enabled(self, ri):
"""Verify that process was enabled for a router instance."""
expected_calls = self._radvd_expected_call_external_process(ri)
self.assertEqual(expected_calls, self.external_process.mock_calls)
def _assert_ri_process_disabled(self, ri):
"""Verify that process was disabled for a router instance."""
expected_calls = self._radvd_expected_call_external_process(ri, False)
self.assertEqual(expected_calls, self.external_process.mock_calls)
def test_process_router_ipv6_interface_added(self):
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_interface_added(router)
self._assert_ri_process_enabled(ri)
# Expect radvd configured without prefix
self.assertNotIn('prefix', self.utils_replace_file.call_args[0][1])
def test_process_router_ipv6_slaac_interface_added(self):
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_interface_added(
router, ra_mode=n_const.IPV6_SLAAC)
self._assert_ri_process_enabled(ri)
# Expect radvd configured with prefix
radvd_config_str = self.utils_replace_file.call_args[0][1]
self.assertIn('prefix', radvd_config_str)
self.assertIn('AdvAutonomous on', radvd_config_str)
def test_process_router_ipv6_dhcpv6_stateful_interface_added(self):
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_interface_added(
router, ra_mode=n_const.DHCPV6_STATEFUL)
self._assert_ri_process_enabled(ri)
# Expect radvd configured with prefix
radvd_config_str = self.utils_replace_file.call_args[0][1]
self.assertIn('prefix', radvd_config_str)
self.assertIn('AdvAutonomous off', radvd_config_str)
def test_process_router_ipv6_subnets_added(self):
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes=[
{'ra_mode': n_const.IPV6_SLAAC,
'address_mode': n_const.IPV6_SLAAC},
{'ra_mode': n_const.DHCPV6_STATELESS,
'address_mode': n_const.DHCPV6_STATELESS},
{'ra_mode': n_const.DHCPV6_STATEFUL,
'address_mode': n_const.DHCPV6_STATEFUL}])
self._assert_ri_process_enabled(ri)
radvd_config_str = self.utils_replace_file.call_args[0][1]
# Assert we have a prefix from IPV6_SLAAC and a prefix from
# DHCPV6_STATELESS on one interface
self.assertEqual(3, radvd_config_str.count("prefix"))
self.assertEqual(1, radvd_config_str.count("interface"))
self.assertEqual(2, radvd_config_str.count("AdvAutonomous on"))
self.assertEqual(1, radvd_config_str.count("AdvAutonomous off"))
def test_process_router_ipv6_subnets_added_to_existing_port(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
agent.external_gateway_added = mock.Mock()
self._process_router_instance_for_agent(agent, ri, router)
# Add the first subnet on a new interface
l3_test_common.router_append_subnet(
router, count=1,
ip_version=6, ipv6_subnet_modes=[
{'ra_mode': n_const.IPV6_SLAAC,
'address_mode': n_const.IPV6_SLAAC}])
self._process_router_instance_for_agent(agent, ri, router)
self._assert_ri_process_enabled(ri)
radvd_config = self.utils_replace_file.call_args[0][1].split()
self.assertEqual(1, len(ri.internal_ports[1]['subnets']))
self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips']))
self.assertEqual(1, radvd_config.count("prefix"))
self.assertEqual(1, radvd_config.count("interface"))
# Reset mocks to verify radvd enabled and configured correctly
# after second subnet added to interface
self.external_process.reset_mock()
self.utils_replace_file.reset_mock()
# Add the second subnet on the same interface
interface_id = router[l3_constants.INTERFACE_KEY][1]['id']
l3_test_common.router_append_subnet(
router, count=1,
ip_version=6,
ipv6_subnet_modes=[
{'ra_mode': n_const.IPV6_SLAAC,
'address_mode': n_const.IPV6_SLAAC}],
interface_id=interface_id)
self._process_router_instance_for_agent(agent, ri, router)
# radvd should have been enabled again and the interface
# should have two prefixes
self._assert_ri_process_enabled(ri)
radvd_config = self.utils_replace_file.call_args[0][1].split()
self.assertEqual(2, len(ri.internal_ports[1]['subnets']))
self.assertEqual(2, len(ri.internal_ports[1]['fixed_ips']))
self.assertEqual(2, radvd_config.count("prefix"))
self.assertEqual(1, radvd_config.count("interface"))
def test_process_router_ipv6v4_interface_added(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process with NAT
ri.process(agent)
# Add an IPv4 and IPv6 interface and reprocess
l3_test_common.router_append_interface(router, count=1, ip_version=4)
l3_test_common.router_append_interface(router, count=1, ip_version=6)
# Reassign the router object to RouterInfo
self._process_router_instance_for_agent(agent, ri, router)
self._assert_ri_process_enabled(ri)
def test_process_router_interface_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process with NAT
ri.process(agent)
# Add an interface and reprocess
del router[l3_constants.INTERFACE_KEY][1]
# Reassign the router object to RouterInfo
ri.router = router
ri.process(agent)
# send_ip_addr_adv_notif is called both times process is called
self.assertEqual(2, self.send_adv_notif.call_count)
def test_process_router_ipv6_interface_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
self._process_router_instance_for_agent(agent, ri, router)
# Add an IPv6 interface and reprocess
l3_test_common.router_append_interface(router, count=1, ip_version=6)
self._process_router_instance_for_agent(agent, ri, router)
self._assert_ri_process_enabled(ri)
# Reset the calls so we can check for disable radvd
self.external_process.reset_mock()
self.process_monitor.reset_mock()
# Remove the IPv6 interface and reprocess
del router[l3_constants.INTERFACE_KEY][1]
self._process_router_instance_for_agent(agent, ri, router)
self._assert_ri_process_disabled(ri)
def test_process_router_ipv6_subnet_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
agent.external_gateway_added = mock.Mock()
self._process_router_instance_for_agent(agent, ri, router)
# Add an IPv6 interface with two subnets and reprocess
l3_test_common.router_append_subnet(
router, count=2, ip_version=6,
ipv6_subnet_modes=([{'ra_mode': n_const.IPV6_SLAAC,
'address_mode': n_const.IPV6_SLAAC}]
* 2))
self._process_router_instance_for_agent(agent, ri, router)
self._assert_ri_process_enabled(ri)
# Reset mocks to check for modified radvd config
self.utils_replace_file.reset_mock()
self.external_process.reset_mock()
# Remove one subnet from the interface and reprocess
interfaces = copy.deepcopy(router[l3_constants.INTERFACE_KEY])
del interfaces[1]['subnets'][0]
del interfaces[1]['fixed_ips'][0]
router[l3_constants.INTERFACE_KEY] = interfaces
self._process_router_instance_for_agent(agent, ri, router)
# Assert radvd was enabled again and that we only have one
# prefix on the interface
self._assert_ri_process_enabled(ri)
radvd_config = self.utils_replace_file.call_args[0][1].split()
self.assertEqual(1, len(ri.internal_ports[1]['subnets']))
self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips']))
self.assertEqual(1, radvd_config.count("interface"))
self.assertEqual(1, radvd_config.count("prefix"))
def test_process_router_internal_network_added_unexpected_error(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
with mock.patch.object(
ri,
'internal_network_added') as internal_network_added:
# raise RuntimeError to simulate that an unexpected exception
# occurs
internal_network_added.side_effect = RuntimeError
self.assertRaises(RuntimeError, ri.process, agent)
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
internal_network_added.side_effect = None
# periodic_sync_routers_task finds out that _rpc_loop failed to
# process the router last time, it will retry in the next run.
ri.process(agent)
# We were able to add the port to ri.internal_ports
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_internal_network_removed_unexpected_error(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# add an internal port
ri.process(agent)
with mock.patch.object(
ri,
'internal_network_removed') as internal_net_removed:
# raise RuntimeError to simulate that an unexpected exception
# occurs
internal_net_removed.side_effect = RuntimeError
ri.internal_ports[0]['admin_state_up'] = False
# The above port is set to down state, remove it.
self.assertRaises(RuntimeError, ri.process, agent)
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
internal_net_removed.side_effect = None
# periodic_sync_routers_task finds out that _rpc_loop failed to
# process the router last time, it will retry in the next run.
ri.process(agent)
# We were able to remove the port from ri.internal_ports
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_floatingip_nochange(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=1)
fip1 = {'id': _uuid(), 'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7', 'status': 'ACTIVE',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}
fip2 = copy.copy(fip1)
fip2.update({'id': _uuid(), 'status': 'DOWN',
'floating_ip_address': '9.9.9.9'})
router[l3_constants.FLOATINGIP_KEY] = [fip1, fip2]
ri = legacy_router.LegacyRouter(router['id'], router,
**self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
with mock.patch.object(
agent.plugin_rpc, 'update_floatingip_statuses'
) as mock_update_fip_status,\
mock.patch.object(ri, 'get_router_cidrs') as mock_get_cidrs:
mock_get_cidrs.return_value = set(
[fip1['floating_ip_address'] + '/32'])
ri.process(agent)
# make sure only the one that wasn't in existing cidrs was sent
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id, {fip2['id']: 'ACTIVE'})
def test_process_router_floatingip_status_update_if_processed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=1)
fip1 = {'id': _uuid(), 'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7', 'status': 'ACTIVE',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}
fip2 = copy.copy(fip1)
fip2.update({'id': _uuid(), 'status': 'DOWN', })
router[l3_constants.FLOATINGIP_KEY] = [fip1, fip2]
ri = legacy_router.LegacyRouter(router['id'], router,
**self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
with mock.patch.object(
agent.plugin_rpc, 'update_floatingip_statuses'
) as mock_update_fip_status,\
mock.patch.object(ri, 'get_router_cidrs') as mock_get_cidrs:
mock_get_cidrs.return_value = set()
ri.process(agent)
# make sure both was sent since not existed in existing cidrs
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id, {fip1['id']: 'ACTIVE',
fip2['id']: 'ACTIVE'})
def test_process_router_floatingip_disabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = l3_test_common.prepare_router_data(num_internal_ports=1)
router[l3_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'status': 'DOWN',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
ri = legacy_router.LegacyRouter(router['id'],
router,
**self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
ri.process(agent)
# Assess the call for putting the floating IP up was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE})
mock_update_fip_status.reset_mock()
# Process the router again, this time without floating IPs
router[l3_constants.FLOATINGIP_KEY] = []
ri.router = router
ri.process(agent)
# Assess the call for putting the floating IP up was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_DOWN})
def test_process_router_floatingip_exception(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = l3_test_common.prepare_router_data(num_internal_ports=1)
router[l3_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.process_floating_ip_addresses = mock.Mock(
side_effect=RuntimeError)
ri.external_gateway_added = mock.Mock()
ri.process(agent)
# Assess the call for putting the floating IP into Error
# was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_ERROR})
def test_process_external_iptables_exception(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = l3_test_common.prepare_router_data(num_internal_ports=1)
router[l3_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.iptables_manager._apply = mock.Mock(side_effect=Exception)
ri.process_external(agent)
# Assess the call for putting the floating IP into Error
# was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_ERROR})
self.assertEqual(1, ri.iptables_manager._apply.call_count)
def test_handle_router_snat_rules_distributed_without_snat_manager(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(
agent,
HOSTNAME,
'foo_router_id',
{},
**self.ri_kwargs)
ri.iptables_manager = mock.MagicMock()
ri._is_this_snat_host = mock.Mock(return_value=True)
ri.get_ex_gw_port = mock.Mock(return_value=None)
ri._handle_router_snat_rules(None, mock.ANY)
self.assertIsNone(ri.snat_iptables_manager)
self.assertFalse(ri.iptables_manager.called)
def test_handle_router_snat_rules_add_back_jump(self):
ri = l3router.RouterInfo(_uuid(), {}, **self.ri_kwargs)
ri.iptables_manager = mock.MagicMock()
port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
ri._handle_router_snat_rules(port, "iface")
nat = ri.iptables_manager.ipv4['nat']
nat.empty_chain.assert_any_call('snat')
nat.add_rule.assert_any_call('snat', '-j $float-snat')
for call in nat.mock_calls:
name, args, kwargs = call
if name == 'add_rule':
self.assertEqual(('snat', '-j $float-snat'), args)
self.assertEqual({}, kwargs)
break
def test_handle_router_snat_rules_add_rules(self):
ri = l3router.RouterInfo(_uuid(), {}, **self.ri_kwargs)
ex_gw_port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
ri.router = {'distributed': False}
ri._handle_router_snat_rules(ex_gw_port, "iface")
nat_rules = list(map(str, ri.iptables_manager.ipv4['nat'].rules))
wrap_name = ri.iptables_manager.wrap_name
jump_float_rule = "-A %s-snat -j %s-float-snat" % (wrap_name,
wrap_name)
snat_rule1 = ("-A %s-snat -o iface -j SNAT --to-source %s") % (
wrap_name, ex_gw_port['fixed_ips'][0]['ip_address'])
snat_rule2 = ("-A %s-snat -m mark ! --mark 0x2/%s "
"-m conntrack --ctstate DNAT "
"-j SNAT --to-source %s") % (
wrap_name, n_const.ROUTER_MARK_MASK,
ex_gw_port['fixed_ips'][0]['ip_address'])
self.assertIn(jump_float_rule, nat_rules)
self.assertIn(snat_rule1, nat_rules)
self.assertIn(snat_rule2, nat_rules)
self.assertThat(nat_rules.index(jump_float_rule),
matchers.LessThan(nat_rules.index(snat_rule1)))
mangle_rules = list(map(str, ri.iptables_manager.ipv4['mangle'].rules))
mangle_rule = ("-A %s-mark -i iface "
"-j MARK --set-xmark 0x2/%s" %
(wrap_name, n_const.ROUTER_MARK_MASK))
self.assertIn(mangle_rule, mangle_rules)
def test_process_router_delete_stale_internal_devices(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_devlist = [l3_test_common.FakeDev('qr-a1b2c3d4-e5'),
l3_test_common.FakeDev('qr-b2c3d4e5-f6')]
stale_devnames = [dev.name for dev in stale_devlist]
get_devices_return = []
get_devices_return.extend(stale_devlist)
self.mock_ip.get_devices.return_value = get_devices_return
router = l3_test_common.prepare_router_data(enable_snat=True,
num_internal_ports=1)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
self.assertEqual(1, len(internal_ports))
internal_port = internal_ports[0]
with mock.patch.object(ri, 'internal_network_removed'
) as internal_network_removed,\
mock.patch.object(ri, 'internal_network_added'
) as internal_network_added,\
mock.patch.object(ri, 'external_gateway_removed'
) as external_gateway_removed,\
mock.patch.object(ri, 'external_gateway_added'
) as external_gateway_added:
ri.process(agent)
self.assertEqual(1, external_gateway_added.call_count)
self.assertFalse(external_gateway_removed.called)
self.assertFalse(internal_network_removed.called)
internal_network_added.assert_called_once_with(internal_port)
self.assertEqual(len(stale_devnames),
self.mock_driver.unplug.call_count)
calls = [mock.call(stale_devname,
namespace=ri.ns_name,
prefix=l3_agent.INTERNAL_DEV_PREFIX)
for stale_devname in stale_devnames]
self.mock_driver.unplug.assert_has_calls(calls, any_order=True)
def test_process_router_delete_stale_external_devices(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_devlist = [l3_test_common.FakeDev('qg-a1b2c3d4-e5')]
stale_devnames = [dev.name for dev in stale_devlist]
router = l3_test_common.prepare_router_data(enable_snat=True,
num_internal_ports=1)
del router['gw_port']
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
self.mock_ip.get_devices.return_value = stale_devlist
ri.process(agent)
self.mock_driver.unplug.assert_called_with(
stale_devnames[0],
bridge="br-ex",
namespace=ri.ns_name,
prefix=l3_agent.EXTERNAL_DEV_PREFIX)
def test_router_deleted(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.router_deleted(None, FAKE_ID)
self.assertEqual(1, agent._queue.add.call_count)
def test_routers_updated(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.routers_updated(None, [FAKE_ID])
self.assertEqual(1, agent._queue.add.call_count)
def test_removed_from_agent(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.router_removed_from_agent(None, {'router_id': FAKE_ID})
self.assertEqual(1, agent._queue.add.call_count)
def test_added_to_agent(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.router_added_to_agent(None, [FAKE_ID])
self.assertEqual(1, agent._queue.add.call_count)
def test_destroy_namespace(self):
namespace = 'qrouter-bar'
self.mock_ip.get_namespaces.return_value = [namespace]
self.mock_ip.get_devices.return_value = [
l3_test_common.FakeDev('qr-aaaa'),
l3_test_common.FakeDev('rfp-aaaa')]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ns = namespaces.RouterNamespace(
'bar', self.conf, agent.driver, agent.use_ipv6)
ns.create()
ns.delete()
self.mock_driver.unplug.assert_called_once_with('qr-aaaa',
prefix='qr-',
namespace='qrouter'
'-bar')
self.mock_ip.del_veth.assert_called_once_with('rfp-aaaa')
def test_destroy_router_namespace(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ns = namespaces.Namespace(
'qrouter-bar', self.conf, agent.driver, agent.use_ipv6)
ns.create()
ns.delete()
self.mock_ip.netns.delete.assert_called_once_with("qrouter-bar")
def _configure_metadata_proxy(self, enableflag=True):
if not enableflag:
self.conf.set_override('enable_metadata_proxy', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
router = {'id': router_id,
'external_gateway_info': {},
'routes': [],
'distributed': False}
driver = metadata_driver.MetadataDriver
with mock.patch.object(
driver, 'destroy_monitored_metadata_proxy') as destroy_proxy:
with mock.patch.object(
driver, 'spawn_monitored_metadata_proxy') as spawn_proxy:
agent._process_added_router(router)
if enableflag:
spawn_proxy.assert_called_with(
mock.ANY,
mock.ANY,
self.conf.metadata_port,
mock.ANY,
router_id=router_id
)
else:
self.assertFalse(spawn_proxy.call_count)
agent._router_removed(router_id)
if enableflag:
destroy_proxy.assert_called_with(mock.ANY,
router_id,
mock.ANY)
else:
self.assertFalse(destroy_proxy.call_count)
def test_enable_metadata_proxy(self):
self._configure_metadata_proxy()
def test_disable_metadata_proxy_spawn(self):
self._configure_metadata_proxy(enableflag=False)
def test_router_id_specified_in_conf(self):
self.conf.set_override('router_id', '1234')
self._configure_metadata_proxy()
def _test_process_routers_update_rpc_timeout(self, ext_net_call=False,
ext_net_call_failed=False):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.fullsync = False
agent._process_router_if_compatible = mock.Mock()
if ext_net_call_failed:
agent._process_router_if_compatible.side_effect = (
oslo_messaging.MessagingTimeout)
agent._queue = mock.Mock()
agent._resync_router = mock.Mock()
update = mock.Mock()
update.router = None
agent._queue.each_update_to_next_router.side_effect = [
[(None, update)]]
agent._process_router_update()
self.assertFalse(agent.fullsync)
self.assertEqual(ext_net_call,
agent._process_router_if_compatible.called)
agent._resync_router.assert_called_with(update)
def test_process_routers_update_rpc_timeout_on_get_routers(self):
self.plugin_api.get_routers.side_effect = (
oslo_messaging.MessagingTimeout)
self._test_process_routers_update_rpc_timeout()
def test_process_routers_update_resyncs_failed_router(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
# Attempting to configure the router will fail
agent._process_router_if_compatible = mock.MagicMock()
agent._process_router_if_compatible.side_effect = RuntimeError()
# Queue an update from a full sync
update = router_processing_queue.RouterUpdate(
42,
router_processing_queue.PRIORITY_SYNC_ROUTERS_TASK,
router=mock.Mock(),
timestamp=timeutils.utcnow())
agent._queue.add(update)
agent._process_router_update()
# The update contained the router object, get_routers won't be called
self.assertFalse(agent.plugin_rpc.get_routers.called)
# The update failed, assert that get_routers was called
agent._process_router_update()
self.assertTrue(agent.plugin_rpc.get_routers.called)
def test_process_routers_update_rpc_timeout_on_get_ext_net(self):
self._test_process_routers_update_rpc_timeout(ext_net_call=True,
ext_net_call_failed=True)
def _test_process_routers_update_router_deleted(self, error=False):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
update = mock.Mock()
update.router = None
update.action = 1 # ROUTER_DELETED
router_info = mock.MagicMock()
agent.router_info[update.id] = router_info
router_processor = mock.Mock()
agent._queue.each_update_to_next_router.side_effect = [
[(router_processor, update)]]
agent._resync_router = mock.Mock()
if error:
agent._safe_router_removed = mock.Mock()
agent._safe_router_removed.return_value = False
agent._process_router_update()
if error:
self.assertFalse(router_processor.fetched_and_processed.called)
agent._resync_router.assert_called_with(update)
else:
router_info.delete.assert_called_once_with(agent)
self.assertFalse(agent.router_info)
self.assertFalse(agent._resync_router.called)
router_processor.fetched_and_processed.assert_called_once_with(
update.timestamp)
def test_process_routers_update_router_deleted_success(self):
self._test_process_routers_update_router_deleted()
def test_process_routers_update_router_deleted_error(self):
self._test_process_routers_update_router_deleted(True)
def test_process_router_if_compatible_with_no_ext_net_in_conf(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
self.plugin_api.get_external_network_id.assert_called_with(
agent.context)
def test_process_router_if_compatible_with_cached_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
agent.target_ex_net_id = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
self.assertFalse(self.plugin_api.get_external_network_id.called)
def test_process_router_if_compatible_with_stale_cached_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
agent.target_ex_net_id = 'bbb'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
self.plugin_api.get_external_network_id.assert_called_with(
agent.context)
def test_process_router_if_compatible_w_no_ext_net_and_2_net_plugin(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent.router_info = {}
self.plugin_api.get_external_network_id.side_effect = (
exc.TooManyExternalNetworks())
self.assertRaises(exc.TooManyExternalNetworks,
agent._process_router_if_compatible,
router)
self.assertNotIn(router['id'], agent.router_info)
def test_process_router_if_compatible_with_ext_net_in_conf(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'bbb'}}
agent.router_info = {}
self.conf.set_override('gateway_external_network_id', 'aaa')
self.assertRaises(n_exc.RouterNotCompatibleWithAgent,
agent._process_router_if_compatible,
router)
self.assertNotIn(router['id'], agent.router_info)
def test_process_router_if_compatible_with_no_bridge_no_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent.router_info = {}
self.conf.set_override('external_network_bridge', '')
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
def test_nonexistent_interface_driver(self):
self.conf.set_override('interface_driver', None)
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
self.conf.set_override('interface_driver', 'wrong.driver')
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
@mock.patch.object(namespaces.RouterNamespace, 'delete')
@mock.patch.object(dvr_snat_ns.SnatNamespace, 'delete')
def _cleanup_namespace_test(self,
stale_namespace_list,
router_list,
other_namespaces,
mock_snat_ns,
mock_router_ns):
good_namespace_list = [namespaces.NS_PREFIX + r['id']
for r in router_list]
good_namespace_list += [dvr_snat_ns.SNAT_NS_PREFIX + r['id']
for r in router_list]
self.mock_ip.get_namespaces.return_value = (stale_namespace_list +
good_namespace_list +
other_namespaces)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertTrue(agent.namespaces_manager._clean_stale)
pm = self.external_process.return_value
pm.reset_mock()
with agent.namespaces_manager as ns_manager:
for r in router_list:
ns_manager.keep_router(r['id'])
qrouters = [n for n in stale_namespace_list
if n.startswith(namespaces.NS_PREFIX)]
self.assertEqual(len(qrouters), mock_router_ns.call_count)
self.assertEqual(
len(stale_namespace_list) - len(qrouters),
mock_snat_ns.call_count)
self.assertFalse(agent.namespaces_manager._clean_stale)
def test_cleanup_namespace(self):
self.conf.set_override('router_id', None)
stale_namespaces = [namespaces.NS_PREFIX + 'foo',
namespaces.NS_PREFIX + 'bar',
dvr_snat_ns.SNAT_NS_PREFIX + 'foo']
other_namespaces = ['unknown']
self._cleanup_namespace_test(stale_namespaces,
[],
other_namespaces)
def test_cleanup_namespace_with_registered_router_ids(self):
self.conf.set_override('router_id', None)
stale_namespaces = [namespaces.NS_PREFIX + 'cccc',
namespaces.NS_PREFIX + 'eeeee',
dvr_snat_ns.SNAT_NS_PREFIX + 'fffff']
router_list = [{'id': 'foo', 'distributed': False},
{'id': 'aaaa', 'distributed': False}]
other_namespaces = ['qdhcp-aabbcc', 'unknown']
self._cleanup_namespace_test(stale_namespaces,
router_list,
other_namespaces)
def test_cleanup_namespace_with_conf_router_id(self):
self.conf.set_override('router_id', 'bbbbb')
stale_namespaces = [namespaces.NS_PREFIX + 'cccc',
namespaces.NS_PREFIX + 'eeeee',
namespaces.NS_PREFIX + self.conf.router_id]
router_list = [{'id': 'foo', 'distributed': False},
{'id': 'aaaa', 'distributed': False}]
other_namespaces = ['qdhcp-aabbcc', 'unknown']
self._cleanup_namespace_test(stale_namespaces,
router_list,
other_namespaces)
def test_create_dvr_gateway(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = dvr_router.DvrEdgeRouter(agent,
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
port_id = _uuid()
subnet_id = _uuid()
dvr_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': port_id,
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
interface_name = ri._get_snat_int_device_name(port_id)
self.device_exists.return_value = False
with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces:
get_interfaces.return_value = self.snat_ports
ri._create_dvr_gateway(dvr_gw_port, interface_name)
# check 2 internal ports are plugged
# check 1 ext-gw-port is plugged
self.assertEqual(3, self.mock_driver.plug.call_count)
self.assertEqual(3, self.mock_driver.init_router_port.call_count)
def test_process_address_scope(self):
router = l3_test_common.prepare_router_data()
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(agent,
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
ri.get_ex_gw_port = mock.Mock(return_value=None)
# Make sure the code doesn't crash if ri.snat_iptables_manager is None.
ri.process_address_scope()
with mock.patch.object(ri, '_add_address_scope_mark') as mocked_func:
ri.snat_iptables_manager = iptables_manager.IptablesManager(
namespace=mock.ANY, use_ipv6=False)
ri.snat_iptables_manager.defer_apply_off = mock.Mock()
ri.process_address_scope()
self.assertEqual(2, mocked_func.call_count)
def test_get_service_plugin_list(self):
service_plugins = [p_const.L3_ROUTER_NAT]
self.plugin_api.get_service_plugin_list.return_value = service_plugins
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual(service_plugins, agent.neutron_service_plugins)
self.assertTrue(self.plugin_api.get_service_plugin_list.called)
def test_get_service_plugin_list_failed(self):
raise_rpc = oslo_messaging.RemoteError()
self.plugin_api.get_service_plugin_list.side_effect = raise_rpc
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertIsNone(agent.neutron_service_plugins)
self.assertTrue(self.plugin_api.get_service_plugin_list.called)
def test_get_service_plugin_list_retried(self):
raise_timeout = oslo_messaging.MessagingTimeout()
# Raise a timeout the first 2 times it calls
# get_service_plugin_list then return a empty tuple
self.plugin_api.get_service_plugin_list.side_effect = (
raise_timeout, tuple()
)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual(tuple(), agent.neutron_service_plugins)
def test_external_gateway_removed_ext_gw_port_no_fip_ns(self):
self.conf.set_override('state_path', '/tmp')
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = 'dvr_snat'
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['gw_port_host'] = HOSTNAME
self.mock_driver.unplug.reset_mock()
external_net_id = router['gw_port']['network_id']
ri = dvr_router.DvrEdgeRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
ri.remove_floating_ip = mock.Mock()
agent._fetch_external_net_id = mock.Mock(return_value=external_net_id)
ri.ex_gw_port = ri.router['gw_port']
del ri.router['gw_port']
ri.fip_ns = None
nat = ri.iptables_manager.ipv4['nat']
nat.clear_rules_by_tag = mock.Mock()
nat.add_rule = mock.Mock()
ri.snat_namespace = mock.Mock()
ri.external_gateway_removed(
ri.ex_gw_port,
ri.get_external_device_name(ri.ex_gw_port['id']))
self.assertFalse(ri.remove_floating_ip.called)
def test_spawn_radvd(self):
router = l3_test_common.prepare_router_data(ip_version=6)
conffile = '/fake/radvd.conf'
pidfile = '/fake/radvd.pid'
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
# we don't want the whole process manager to be mocked to be
# able to catch execute() calls
self.external_process_p.stop()
self.ip_cls_p.stop()
get_conf_file_name = 'neutron.agent.linux.utils.get_conf_file_name'
get_pid_file_name = ('neutron.agent.linux.external_process.'
'ProcessManager.get_pid_file_name')
utils_execute = 'neutron.agent.common.utils.execute'
mock.patch(get_conf_file_name).start().return_value = conffile
mock.patch(get_pid_file_name).start().return_value = pidfile
execute = mock.patch(utils_execute).start()
radvd = ra.DaemonMonitor(
router['id'],
namespaces.RouterNamespace._get_ns_name(router['id']),
agent.process_monitor,
l3_test_common.FakeDev,
self.conf)
radvd.enable(router['_interfaces'])
cmd = execute.call_args[0][0]
self.assertIn('radvd', cmd)
_join = lambda *args: ' '.join(args)
cmd = _join(*cmd)
self.assertIn(_join('-C', conffile), cmd)
self.assertIn(_join('-p', pidfile), cmd)
self.assertIn(_join('-m', 'syslog'), cmd)
def test_generate_radvd_mtu_conf(self):
router = l3_test_common.prepare_router_data()
ipv6_subnet_modes = [{'ra_mode': n_const.IPV6_SLAAC,
'address_mode': n_const.IPV6_SLAAC}]
network_mtu = '1446'
ri = self._process_router_ipv6_subnet_added(router,
ipv6_subnet_modes,
None,
network_mtu)
expected = "AdvLinkMTU 1446"
ri.agent_conf.set_override('advertise_mtu', False)
ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY])
self.assertNotIn(expected, self.utils_replace_file.call_args[0][1])
# Verify that MTU is advertised when advertise_mtu is True
ri.agent_conf.set_override('advertise_mtu', True)
ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY])
self.assertIn(expected, self.utils_replace_file.call_args[0][1])
def test_generate_radvd_conf_other_and_managed_flag(self):
# expected = {ra_mode: (AdvOtherConfigFlag, AdvManagedFlag), ...}
expected = {n_const.IPV6_SLAAC: (False, False),
n_const.DHCPV6_STATELESS: (True, False),
n_const.DHCPV6_STATEFUL: (False, True)}
modes = [n_const.IPV6_SLAAC, n_const.DHCPV6_STATELESS,
n_const.DHCPV6_STATEFUL]
mode_combos = list(iter_chain(*[[list(combo) for combo in
iter_combinations(modes, i)] for i in range(1, len(modes) + 1)]))
for mode_list in mode_combos:
ipv6_subnet_modes = [{'ra_mode': mode, 'address_mode': mode}
for mode in mode_list]
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_subnet_added(router,
ipv6_subnet_modes)
ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY])
def assertFlag(flag):
return (self.assertIn if flag else self.assertNotIn)
other_flag, managed_flag = (
any(expected[mode][0] for mode in mode_list),
any(expected[mode][1] for mode in mode_list))
assertFlag(other_flag)('AdvOtherConfigFlag on;',
self.utils_replace_file.call_args[0][1])
assertFlag(managed_flag)('AdvManagedFlag on;',
self.utils_replace_file.call_args[0][1])
def test_generate_radvd_intervals(self):
self.conf.set_override('min_rtr_adv_interval', 22)
self.conf.set_override('max_rtr_adv_interval', 66)
router = l3_test_common.prepare_router_data()
ipv6_subnet_modes = [{'ra_mode': n_const.IPV6_SLAAC,
'address_mode': n_const.IPV6_SLAAC}]
ri = self._process_router_ipv6_subnet_added(router,
ipv6_subnet_modes)
ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY])
self.assertIn("MinRtrAdvInterval 22",
self.utils_replace_file.call_args[0][1])
self.assertIn("MaxRtrAdvInterval 66",
self.utils_replace_file.call_args[0][1])
def test_generate_radvd_rdnss_conf(self):
router = l3_test_common.prepare_router_data()
ipv6_subnet_modes = [{'ra_mode': n_const.IPV6_SLAAC,
'address_mode': n_const.IPV6_SLAAC}]
dns_list = ['fd01:1::100', 'fd01:1::200', 'fd01::300', 'fd01::400']
ri = self._process_router_ipv6_subnet_added(router,
ipv6_subnet_modes,
dns_nameservers=dns_list)
ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY])
# Verify that radvd configuration file includes RDNSS entries
expected = "RDNSS "
for dns in dns_list[0:ra.MAX_RDNSS_ENTRIES]:
expected += "%s " % dns
self.assertIn(expected, self.utils_replace_file.call_args[0][1])
def _pd_expected_call_external_process(self, requestor, ri, enable=True):
expected_calls = []
if enable:
expected_calls.append(mock.call(uuid=requestor,
service='dibbler',
default_cmd_callback=mock.ANY,
namespace=ri.ns_name,
conf=mock.ANY,
pid_file=mock.ANY))
expected_calls.append(mock.call().enable(reload_cfg=False))
else:
expected_calls.append(mock.call(uuid=requestor,
service='dibbler',
namespace=ri.ns_name,
conf=mock.ANY,
pid_file=mock.ANY))
expected_calls.append(mock.call().disable(
get_stop_command=mock.ANY))
return expected_calls
def _pd_setup_agent_router(self):
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.external_gateway_added = mock.Mock()
ri.process(agent)
agent._router_added(router['id'], router)
# Make sure radvd monitor is created
if not ri.radvd:
ri.radvd = ra.DaemonMonitor(router['id'],
ri.ns_name,
agent.process_monitor,
ri.get_internal_device_name,
self.conf)
return agent, router, ri
def _pd_remove_gw_interface(self, intfs, agent, router, ri):
expected_pd_update = {}
expected_calls = []
for intf in intfs:
requestor_id = self._pd_get_requestor_id(intf, router, ri)
expected_calls += (self._pd_expected_call_external_process(
requestor_id, ri, False))
for subnet in intf['subnets']:
expected_pd_update[subnet['id']] = (
n_const.PROVISIONAL_IPV6_PD_PREFIX)
# Implement the prefix update notifier
# Keep track of the updated prefix
self.pd_update = {}
def pd_notifier(context, prefix_update):
self.pd_update = prefix_update
for subnet_id, prefix in six.iteritems(prefix_update):
for intf in intfs:
for subnet in intf['subnets']:
if subnet['id'] == subnet_id:
# Update the prefix
subnet['cidr'] = prefix
break
# Remove the gateway interface
agent.pd.notifier = pd_notifier
agent.pd.remove_gw_interface(router['id'])
self._pd_assert_dibbler_calls(expected_calls,
self.external_process.mock_calls[-len(expected_calls):])
self.assertEqual(expected_pd_update, self.pd_update)
def _pd_remove_interfaces(self, intfs, agent, router, ri):
expected_pd_update = []
expected_calls = []
for intf in intfs:
# Remove the router interface
router[l3_constants.INTERFACE_KEY].remove(intf)
requestor_id = self._pd_get_requestor_id(intf, router, ri)
expected_calls += (self._pd_expected_call_external_process(
requestor_id, ri, False))
for subnet in intf['subnets']:
expected_pd_update += [{subnet['id']:
n_const.PROVISIONAL_IPV6_PD_PREFIX}]
# Implement the prefix update notifier
# Keep track of the updated prefix
self.pd_update = []
def pd_notifier(context, prefix_update):
self.pd_update.append(prefix_update)
for intf in intfs:
for subnet in intf['subnets']:
if subnet['id'] in prefix_update:
# Update the prefix
subnet['cidr'] = prefix_update[subnet['id']]
# Process the router for removed interfaces
agent.pd.notifier = pd_notifier
ri.process(agent)
# The number of external process calls takes radvd into account.
# This is because there is no ipv6 interface any more after removing
# the interfaces, and radvd will be killed because of that
self._pd_assert_dibbler_calls(expected_calls,
self.external_process.mock_calls[-len(expected_calls) - 2:])
self._pd_assert_radvd_calls(ri, False)
self.assertEqual(expected_pd_update, self.pd_update)
def _pd_get_requestor_id(self, intf, router, ri):
ifname = ri.get_internal_device_name(intf['id'])
for subnet in intf['subnets']:
return dibbler.PDDibbler(router['id'],
subnet['id'], ifname).requestor_id
def _pd_assert_dibbler_calls(self, expected, actual):
'''Check the external process calls for dibbler are expected
in the case of multiple pd-enabled router ports, the exact sequence
of these calls are not deterministic. It's known, though, that each
external_process call is followed with either an enable() or disable()
'''
num_ext_calls = len(expected) // 2
expected_ext_calls = []
actual_ext_calls = []
expected_action_calls = []
actual_action_calls = []
for c in range(num_ext_calls):
expected_ext_calls.append(expected[c * 2])
actual_ext_calls.append(actual[c * 2])
expected_action_calls.append(expected[c * 2 + 1])
actual_action_calls.append(actual[c * 2 + 1])
self.assertEqual(expected_action_calls, actual_action_calls)
for exp in expected_ext_calls:
for act in actual_ext_calls:
if exp == act:
break
else:
msg = "Unexpected dibbler external process call."
self.fail(msg)
def _pd_assert_radvd_calls(self, ri, enable=True):
exp_calls = self._radvd_expected_call_external_process(ri, enable)
self.assertEqual(exp_calls,
self.external_process.mock_calls[-len(exp_calls):])
def _pd_get_prefixes(self, agent, router, ri,
existing_intfs, new_intfs, mock_get_prefix):
# First generate the prefixes that will be used for each interface
prefixes = {}
expected_pd_update = {}
expected_calls = []
for ifno, intf in enumerate(existing_intfs + new_intfs):
requestor_id = self._pd_get_requestor_id(intf, router, ri)
prefixes[requestor_id] = "2001:cafe:cafe:%d::/64" % ifno
if intf in new_intfs:
subnet_id = (intf['subnets'][0]['id'] if intf['subnets']
else None)
expected_pd_update[subnet_id] = prefixes[requestor_id]
expected_calls += (
self._pd_expected_call_external_process(requestor_id, ri))
# Implement the prefix update notifier
# Keep track of the updated prefix
self.pd_update = {}
def pd_notifier(context, prefix_update):
self.pd_update = prefix_update
for subnet_id, prefix in six.iteritems(prefix_update):
for intf in new_intfs:
for subnet in intf['subnets']:
if subnet['id'] == subnet_id:
# Update the prefix
subnet['cidr'] = prefix
break
# Start the dibbler client
agent.pd.notifier = pd_notifier
agent.pd.process_prefix_update()
# Get the prefix and check that the neutron server is notified
def get_prefix(pdo):
key = '%s:%s:%s' % (pdo.router_id, pdo.subnet_id, pdo.ri_ifname)
return prefixes[key]
mock_get_prefix.side_effect = get_prefix
agent.pd.process_prefix_update()
# Make sure that the updated prefixes are expected
self._pd_assert_dibbler_calls(expected_calls,
self.external_process.mock_calls[-len(expected_calls):])
self.assertEqual(expected_pd_update, self.pd_update)
def _pd_add_gw_interface(self, agent, router, ri):
gw_ifname = ri.get_external_device_name(router['gw_port']['id'])
agent.pd.add_gw_interface(router['id'], gw_ifname)
@mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
@mock.patch.object(dibbler.os, 'getpid', return_value=1234)
@mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
return_value=True)
@mock.patch.object(dibbler.os, 'chmod')
@mock.patch.object(dibbler.shutil, 'rmtree')
@mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
def test_pd_add_remove_subnet(self, mock1, mock2, mock3, mock4,
mock_getpid, mock_get_prefix):
'''Add and remove one pd-enabled subnet
Remove the interface by deleting it from the router
'''
# Initial setup
agent, router, ri = self._pd_setup_agent_router()
# Create one pd-enabled subnet and add router interface
intfs = l3_test_common.router_append_pd_enabled_subnet(router)
ri.process(agent)
# No client should be started since there is no gateway port
self.assertFalse(self.external_process.call_count)
self.assertFalse(mock_get_prefix.call_count)
# Add the gateway interface
self._pd_add_gw_interface(agent, router, ri)
# Get one prefix
self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
# Update the router with the new prefix
ri.process(agent)
# Check that radvd is started and the router port is configured
# with the new prefix
self._pd_assert_radvd_calls(ri)
# Now remove the interface
self._pd_remove_interfaces(intfs, agent, router, ri)
@mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
@mock.patch.object(dibbler.os, 'getpid', return_value=1234)
@mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
return_value=True)
@mock.patch.object(dibbler.os, 'chmod')
@mock.patch.object(dibbler.shutil, 'rmtree')
@mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
def test_pd_remove_gateway(self, mock1, mock2, mock3, mock4,
mock_getpid, mock_get_prefix):
'''Add one pd-enabled subnet and remove the gateway port
Remove the gateway port and check the prefix is removed
'''
# Initial setup
agent, router, ri = self._pd_setup_agent_router()
# Create one pd-enabled subnet and add router interface
intfs = l3_test_common.router_append_pd_enabled_subnet(router)
ri.process(agent)
# Add the gateway interface
self._pd_add_gw_interface(agent, router, ri)
# Get one prefix
self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
# Update the router with the new prefix
ri.process(agent)
# Check that radvd is started
self._pd_assert_radvd_calls(ri)
# Now remove the gw interface
self._pd_remove_gw_interface(intfs, agent, router, ri)
# There will be a router update
ri.process(agent)
@mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
@mock.patch.object(dibbler.os, 'getpid', return_value=1234)
@mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
return_value=True)
@mock.patch.object(dibbler.os, 'chmod')
@mock.patch.object(dibbler.shutil, 'rmtree')
@mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
def test_pd_add_remove_2_subnets(self, mock1, mock2, mock3, mock4,
mock_getpid, mock_get_prefix):
'''Add and remove two pd-enabled subnets
Remove the interfaces by deleting them from the router
'''
# Initial setup
agent, router, ri = self._pd_setup_agent_router()
# Create 2 pd-enabled subnets and add router interfaces
intfs = l3_test_common.router_append_pd_enabled_subnet(router, count=2)
ri.process(agent)
# No client should be started
self.assertFalse(self.external_process.call_count)
self.assertFalse(mock_get_prefix.call_count)
# Add the gateway interface
self._pd_add_gw_interface(agent, router, ri)
# Get prefixes
self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
# Update the router with the new prefix
ri.process(agent)
# Check that radvd is started and the router port is configured
# with the new prefix
self._pd_assert_radvd_calls(ri)
# Now remove the interface
self._pd_remove_interfaces(intfs, agent, router, ri)
@mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
@mock.patch.object(dibbler.os, 'getpid', return_value=1234)
@mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
return_value=True)
@mock.patch.object(dibbler.os, 'chmod')
@mock.patch.object(dibbler.shutil, 'rmtree')
@mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
def test_pd_remove_gateway_2_subnets(self, mock1, mock2, mock3, mock4,
mock_getpid, mock_get_prefix):
'''Add one pd-enabled subnet, followed by adding another one
Remove the gateway port and check the prefix is removed
'''
# Initial setup
agent, router, ri = self._pd_setup_agent_router()
# Add the gateway interface
self._pd_add_gw_interface(agent, router, ri)
# Create 1 pd-enabled subnet and add router interface
intfs = l3_test_common.router_append_pd_enabled_subnet(router, count=1)
ri.process(agent)
# Get prefixes
self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
# Update the router with the new prefix
ri.process(agent)
# Check that radvd is started
self._pd_assert_radvd_calls(ri)
# Now add another interface
# Create one pd-enabled subnet and add router interface
intfs1 = l3_test_common.router_append_pd_enabled_subnet(router,
count=1)
ri.process(agent)
# Get prefixes
self._pd_get_prefixes(agent, router, ri, intfs,
intfs1, mock_get_prefix)
# Update the router with the new prefix
ri.process(agent)
# Check that radvd is notified for the new prefix
self._pd_assert_radvd_calls(ri)
# Now remove the gw interface
self._pd_remove_gw_interface(intfs + intfs1, agent, router, ri)
ri.process(agent)
def _verify_address_scopes_iptables_rule(self, mock_iptables_manager):
filter_calls = [mock.call.add_chain('scope'),
mock.call.add_rule('FORWARD', '-j $scope')]
v6_mangle_calls = [mock.call.add_chain('scope'),
mock.call.add_rule('PREROUTING', '-j $scope'),
mock.call.add_rule(
'PREROUTING',
'-m connmark ! --mark 0x0/0xffff0000 '
'-j CONNMARK --restore-mark '
'--nfmask 0xffff0000 --ctmask 0xffff0000')]
v4_mangle_calls = (v6_mangle_calls +
[mock.call.add_chain('floatingip'),
mock.call.add_chain('float-snat'),
mock.call.add_rule('PREROUTING', '-j $floatingip'),
mock.call.add_rule(
'float-snat',
'-m connmark --mark 0x0/0xffff0000 '
'-j CONNMARK --save-mark '
'--nfmask 0xffff0000 --ctmask 0xffff0000')])
mock_iptables_manager.ipv4['filter'].assert_has_calls(filter_calls)
mock_iptables_manager.ipv6['filter'].assert_has_calls(filter_calls)
mock_iptables_manager.ipv4['mangle'].assert_has_calls(v4_mangle_calls,
any_order=True)
mock_iptables_manager.ipv6['mangle'].assert_has_calls(v6_mangle_calls,
any_order=True)
def test_initialize_address_scope_iptables_rules(self):
id = _uuid()
with mock.patch('neutron.agent.linux.iptables_manager.'
'IptablesManager'):
ri = l3router.RouterInfo(id, {}, **self.ri_kwargs)
self._verify_address_scopes_iptables_rule(ri.iptables_manager)
def test_initialize_address_scope_iptables_rules_dvr(self):
router = l3_test_common.prepare_router_data()
with mock.patch('neutron.agent.linux.iptables_manager.'
'IptablesManager'):
ri = dvr_router.DvrEdgeRouter(mock.Mock(),
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
self._verify_address_scopes_iptables_rule(ri.iptables_manager)
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(
self, ri)
router['gw_port_host'] = ri.host
ri._external_gateway_added = mock.Mock()
ri._create_dvr_gateway(ex_gw_port, interface_name)
self._verify_address_scopes_iptables_rule(
ri.snat_iptables_manager)
|
bigswitch/neutron
|
neutron/tests/unit/agent/l3/test_agent.py
|
Python
|
apache-2.0
| 133,544
|
#!/usr/bin/env python
# REF [site] >> https://github.com/baidu-research/warp-ctc
import numpy as np
import tensorflow as tf
import warpctc_tensorflow
# REF [site] >> https://github.com/baidu-research/warp-ctc/blob/master/tests/test_cpu.cpp
def simple_toy_example_1():
#num_time_steps = 5
#num_batches = 2
#alphabet_size = 6 # Feature size.
blank_label = 5
activations = np.array([
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508]],
[[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549]],
[[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456]],
[[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345]],
[[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]
])
activations = np.log(activations) # ???
activation_lens = np.array([5, 5])
labels = np.array([
0, 1, 2, 1, 0,
0, 1, 1, 0
])
label_lens = np.array([5, 4])
# Expected CTC = [3.3421143650988143, 5.42262].
ctc_costs = warpctc_tensorflow.ctc(activations, labels, label_lens, activation_lens, blank_label=blank_label)
with tf.Session() as sess:
costs = sess.run(ctc_costs)
print('CTC costs =', costs)
def simple_toy_example_2():
#num_time_steps = 2
#num_batches = 3
#alphabet_size = 5 # Feature size.
blank_label = 0
probs = np.array([
[[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]],
[[0.1, 0.1, 0.1, 0.6, 0.1], [0.1, 0.1, 0.1, 0.1, 0.6]],
[[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.6, 0.1]]
])
probs = np.transpose(probs, (1, 0, 2)) # (batches, time-steps, features) -> (time-steps, batches, features).
#probs = np.log(probs) # ???
prob_lens = np.array([2, 2, 2])
#labels = np.array([[1, 2], [3, 4], [1, 3]]) # InvalidArgumentError (see above for traceback): flat_labels is not a vector.
labels = np.array([
1, 2,
3, 4,
1, 3
])
label_lens = np.array([2, 2, 2])
ctc_costs = warpctc_tensorflow.ctc(probs, labels, label_lens, prob_lens, blank_label=blank_label)
with tf.Session() as sess:
costs = sess.run(ctc_costs)
print('CTC costs =', costs)
"""
ctc(activations, flat_labels, label_lengths, input_lengths, blank_label=0)
Computes the CTC loss between a sequence of activations and a ground truth labeling.
Inputs:
activations: A 3-D Tensor of floats.
The dimensions should be (t, n, a), where t is the time index, n is the minibatch index, and a indexes over activations for each symbol in the alphabet.
flat_labels: A 1-D Tensor of ints, a concatenation of all the labels for the minibatch.
label_lengths: A 1-D Tensor of ints, the length of each label for each example in the minibatch.
input_lengths: A 1-D Tensor of ints, the number of time steps for each sequence in the minibatch.
blank_label: int, the label value/index that the CTC calculation should use as the blank label.
Returns:
1-D float Tensor, the cost of each example in the minibatch (as negative log probabilities).
This class performs the softmax operation internally.
The label reserved for the blank symbol should be label 0.
"""
def main():
simple_toy_example_1()
simple_toy_example_2()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
|
sangwook236/general-development-and-testing
|
sw_dev/python/rnd/test/machine_learning/warp_ctc_test.py
|
Python
|
gpl-2.0
| 3,530
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for input_pipeline_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from tensorflow.contrib.input_pipeline.ops import gen_input_pipeline_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import resource_loader
_input_pipeline_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_input_pipeline_ops.so"))
def obtain_next(string_list_tensor, counter):
"""Basic wrapper for the ObtainNextOp.
Args:
string_list_tensor: A tensor that is a list of strings
counter: an int64 ref tensor to keep track of which element is returned.
Returns:
An op that produces the element at counter + 1 in the list, round
robin style.
"""
return gen_input_pipeline_ops.obtain_next(string_list_tensor, counter)
def _maybe_randomize_list(string_list, shuffle):
if shuffle:
random.shuffle(string_list)
return string_list
def _create_list(string_list, shuffle, seed, num_epochs):
if shuffle and seed:
random.seed(seed)
expanded_list = _maybe_randomize_list(string_list, shuffle)
if num_epochs:
for _ in range(num_epochs - 1):
expanded_list.extend(_maybe_randomize_list(string_list, shuffle))
return expanded_list
def seek_next(string_list, shuffle=False, seed=None, num_epochs=None):
"""Returns an op that seeks the next element in a list of strings.
Seeking happens in a round robin fashion. This op creates a variable called
obtain_next_counter that is initialized to -1 and is used to keep track of
which element in the list was returned, and a variable
obtain_next_expanded_list to hold the list. If num_epochs is not None, then we
limit the number of times we go around the string_list before OutOfRangeError
is thrown. It creates a variable to keep track of this.
Args:
string_list: A list of strings.
shuffle: If true, we shuffle the string_list differently for each epoch.
seed: Seed used for shuffling.
num_epochs: Returns OutOfRangeError once string_list has been repeated
num_epoch times. If unspecified then keeps on looping.
Returns:
An op that produces the next element in the provided list.
"""
expanded_list = _create_list(string_list, shuffle, seed, num_epochs)
with variable_scope.variable_scope("obtain_next"):
counter = variable_scope.get_variable(
name="obtain_next_counter",
initializer=constant_op.constant(
-1, dtype=dtypes.int64),
dtype=dtypes.int64)
with ops.colocate_with(counter):
string_tensor = variable_scope.get_variable(
name="obtain_next_expanded_list",
initializer=constant_op.constant(expanded_list),
dtype=dtypes.string)
if num_epochs:
filename_counter = variable_scope.get_variable(
name="obtain_next_filename_counter",
initializer=constant_op.constant(
0, dtype=dtypes.int64),
dtype=dtypes.int64)
c = filename_counter.count_up_to(len(expanded_list))
with ops.control_dependencies([c]):
return obtain_next(string_tensor, counter)
else:
return obtain_next(string_tensor, counter)
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/tensorflow/contrib/input_pipeline/python/ops/input_pipeline_ops.py
|
Python
|
bsd-2-clause
| 4,117
|
import re
from collections import defaultdict
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import PermissionDenied
from django.forms import ModelForm
from django.shortcuts import redirect
from django.views.generic import CreateView, DetailView, RedirectView
from recipes.models import Menu, Recipe, MenuRecipe
from recipes.auth import check_share_key, get_share_key
class MenuForm(ModelForm):
class Meta:
model = Menu
fields = ['name']
class MenuDeleteView(RedirectView):
permanent = False
def get_redirect_url(self, pk):
m = Menu.objects.get(pk=pk)
if not (self.request.user.is_superuser or self.request.user == m.user):
raise PermissionDenied()
m.delete()
return "/"
class MenuRecipeOrderView(RedirectView):
permanent = False
def get_redirect_url(self, menu, recipe, direction):
menu = Menu.objects.get(pk=int(menu))
items = list(MenuRecipe.objects.filter(menu=menu))
for i in items:
print("-", i.recipe.pk, i.order, i.recipe.title)
print([mr.recipe.pk for mr in items])
n = [mr.recipe.pk for mr in items].index(int(recipe))
swap_with = (n - 1) if direction == 'up' else (n + 1)
items[n], items[swap_with] = items[swap_with], items[n]
for i, item in enumerate(items):
item.order = i
item.save()
print(i, item.recipe.title)
return menu.get_absolute_url()
class MenuRecipeRemoveView(RedirectView):
permanent = False
def get_redirect_url(self, menu, recipe):
menu = Menu.objects.get(pk=int(menu))
recipe = Recipe.objects.get(pk=int(recipe))
MenuRecipe.objects.get(menu=menu, recipe=recipe).delete()
return menu.get_absolute_url()
class MenuAddRecipeView(RedirectView):
def get_redirect_url(self):
if not 'menu' in self.request.GET and 'recipe' in self.reqest.GET:
raise Exception("Need a recipe and menu to be able to add one to the other...")
menu = self.request.GET['menu']
if menu == -1:
m = Menu.objects.create(name="<nieuw menu>", user=self.request.user)
else:
m = Menu.objects.get(pk=menu)
r = Recipe.objects.get(pk=self.request.GET['recipe'])
if MenuRecipe.objects.filter(menu=m, recipe=r).exists():
self.request.session['add_warning'] = 'Noot: Dit recept zat al in dit menu'
else:
self.request.session['add_warning'] = ''
MenuRecipe.objects.create(menu=m, recipe=r, order=MenuRecipe.objects.filter(menu=m).count()+1)
return m.get_absolute_url()
class MenuAddView(CreateView):
model = Menu
form_class = MenuForm
def form_valid(self, form):
obj = form.save(commit=False)
obj.user = self.request.user
return super().form_valid(form)
def form_invalid(self, form):
raise Exception("!!!!")
_PARTS = ("snufje", "teentje", "gram", "eetlepel", "eetl", "dl", "¼", "½", "takje", "stengel")
def parse_ingredient(ing):
quant = []
parts = ing.split()
while parts:
part = parts.pop(0)
if re.match(r"^\d+$", part) or part in _PARTS or re.sub("s$", "", part) in _PARTS:
quant.append(part)
else:
return " ".join(quant), " ".join([part] + parts)
class MenuDetailView(UserPassesTestMixin, DetailView):
model = Menu
def get(self, *args, **kwargs):
if 'share' not in self.request.GET and self.request.user.is_authenticated:
params = self.request.GET.copy()
params['share'] = get_share_key(self.request.user.id, int(self.kwargs['pk']), is_menu=True)
url = f"{self.request.path}?{params.urlencode()}"
return redirect(url)
return super().get(*args, **kwargs)
def get_context_data(self, **kwargs):
ingredients = defaultdict(list)
context = super().get_context_data(**kwargs)
self.request.session['current_menu'] = self.object.pk
warning = self.request.session.get('add_warning')
self.request.session['add_warning'] = None
recipes = MenuRecipe.objects.filter(menu=self.object)
for recipe in recipes:
recipe.ingredient_rows = list(row.split("|") for row in recipe.recipe.ingredients.splitlines())
for ingrow in recipe.ingredient_rows:
for ing in ingrow:
if ing.strip():
quant, ingredient = parse_ingredient(ing)
ingredients[ingredient].append(quant)
# get share key for recipes
share_user = self.share_user or self.request.user
if not share_user.is_authenticated:
raise Exception("User not authenticated (so why did they get this far?)")
recipe.share_key = get_share_key(share_user.id, recipe.recipe_id)
ingredients = sorted((" + ".join(q), i) for (i, q) in ingredients.items())
context.update(**locals())
return context
def test_func(self):
share = self.request.GET.get('share')
if share:
self.share_user = check_share_key(share)
if not self.share_user:
return False
return self.get_object().can_view(self.request.user) or self.get_object().can_view(self.share_user)
|
vanatteveldt/luctor
|
recipes/menu.py
|
Python
|
mit
| 5,407
|
import logging
import argparse
import json
import sys
import traceback
import threading
from doc2pdf import worker
EXAMPLE_CONFIG = """
{
"log_file": "C:\\\\pathtolog.txt",
"autodelete": false,
"converter_timeout": 60,
"converter_retries": 3,
"converter_delay": 5,
"queue_capacity": 100,
"observer_buffer_size": 65536,
"temporary_directory": "C:\\\\pathtotmp",
"include_paths": ["C:\\\\incpath1", "C:\\\\incpath2"],
"exclude_paths": ["C:\\\\incpath1\\\\excpath1"]
}
""".strip().encode("utf-8")
def catchexcept(etype, value, tb):
logging.error("uncatched exception...")
logging.error("type: %s, value: %s, value type: %s, traceback: %s" % (etype.__name__, value, type(value), "".join(traceback.format_tb(tb))))
def hookexcept():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
sys.excepthook = catchexcept
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def main():
parser = argparse.ArgumentParser(description="automatic ms office to pdf converter")
parser.add_argument("config", help="path to the config file")
parser.add_argument("-c", dest="create", action="store_const", const=True, help="create sample config")
args = parser.parse_args()
if args.create:
config_file = open(args.config, "wb")
config_file.write(EXAMPLE_CONFIG)
config_file.close()
return
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
logFormatter = logging.Formatter("%(asctime)s %(message)s")
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
logging.info("starting doc2pdf...")
logging.info("hook exceptions...")
hookexcept()
config_file = open(args.config)
config = json.load(config_file)
logging.info("config loaded.")
fileHandler = logging.FileHandler(config["log_file"])
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
w = worker.Worker(config)
w.start()
if __name__ == "__main__":
main()
|
andiwand/doc2pdf
|
src/doc2pdf/cli.py
|
Python
|
lgpl-3.0
| 2,717
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-01 17:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bearing', '0043_auto_20180227_1914'),
]
operations = [
migrations.CreateModel(
name='BearingSearch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bearing', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bearing_search_list', to='bearing.Bearing', verbose_name='Bearing Search Item')),
],
options={
'db_table': 'bearing_search',
'verbose_name_plural': 'Bearing Searchers',
'verbose_name': 'Bearing Search',
},
),
]
|
manti-by/POD
|
app/bearing/migrations/0044_bearingsearch.py
|
Python
|
bsd-3-clause
| 939
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import socket
import six
from django.core.exceptions import ValidationError
from django.core.validators import validate_ipv46_address
from django.utils.translation import ugettext_lazy as _
RE_PATH = re.compile('([\w/\[\] .-]+)$', re.UNICODE)
def path_validator(value):
if not isinstance(value, six.string_types):
message = _('%(show_value)s is not valid path')
raise ValidationError(message, code='invalid_path', params={'show_value': value})
if value.endswith('/'):
message = _('%(show_value)s is not valid path')
raise ValidationError(message, code='invalid_path', params={'show_value': value})
valid_path = value.replace('//', '/')
str_len = len(value)
if str_len != len(valid_path):
message = _('%(show_value)s is not valid path')
raise ValidationError(message, code='invalid_path', params={'show_value': value})
if not RE_PATH.match(value):
message = _('%(show_value)s is not valid path')
raise ValidationError(message, code='invalid_path', params={'show_value': value})
def port_validator(value):
try:
if not (0 < value < 65535) or not isinstance(value, int):
message = _('%(show_value)s is not valid port')
raise ValidationError(message, code='invalid_port', params={'show_value': value})
except TypeError: # pragma: no cover
# Python3
message = _('%(show_value)s is not valid port')
raise ValidationError(message, code='invalid_port', params={'show_value': value})
def bind_port_validator(value):
port_validator(value)
if value > 49152:
message = _('%(show_value)s is not valid bind port')
raise ValidationError(message, code='invalid_port', params={'show_value': value})
def host_validator(value):
if not isinstance(value, six.string_types):
value = "{}".format(value)
try:
validate_ipv46_address(value)
except ValidationError:
try:
socket.gethostbyname(value)
except socket.error:
message = _('%(show_value)s is not valid host name, cannot be resolved')
raise ValidationError(message, code='invalid_host', params={'show_value': value})
|
ehooo/email_backup
|
email_backup/core/validators.py
|
Python
|
gpl-3.0
| 2,289
|
# coding: utf-8
from __future__ import unicode_literals
from boxsdk.exception import BoxAPIException
import csv
import os
from box_manage_users.scripts.script import Script
class MassProvisionScript(Script):
"""
Script to create many users in an enterprise at once.
"""
_title = 'Provision Users & Create Personal Folders'
_message = 'Ensure that the csv file is in the inputs folder with the name input_users.csv'
def run(self):
"""
Base class override.
Open the input_users.csv and create a user for each name/email in the file.
"""
with open(os.path.join('inputs', 'input_users.csv'), 'rb') as f:
reader = csv.reader(f)
for row in reader:
iterrow = iter(row)
for item in iterrow:
name = item
email = next(iterrow)
self.create_user_and_folder(email, name, 'co-owner')
super(MassProvisionScript, self).run()
def create_user_and_folder(self, email, name, access='editor'):
"""
Creates a new user and their own personal folder.
"""
# Log which user script is provisioning in:
self._overview_logger.info('\n\nEmail: %s - Name: %s', email, name)
#Create new enterprise user
try:
new_person = self._client.create_new_user(email, name)
except BoxAPIException as ex:
self._fail_logger.warning('Could not create user {} ({}) - {}'.format(name, email, ex))
return
new_person_id = new_person.id
#Create own personal folder
new_person_folder = self._client.create_new_folder(name, '0')
new_folder_id = new_person_folder.id
#Add new user as collaborator
collab = self._client.add_collab(new_folder_id, new_person_id, access)
new_collab_id = collab.id
#Update new collab to Owner
self._client.update_collab(new_collab_id, "owner")
#Grab new collab_id for the admin
my_collab = self._client.get_all_collabs(new_folder_id)
assert len(my_collab) == 1
my_collab_id = my_collab[0].id
#Removed admin collab_id
self._client.delete_collab(my_collab_id)
self._logger.info('Success!\n')
def main():
MassProvisionScript().run()
if __name__ == '__main__':
main()
|
box-samples/user-management
|
box_manage_users/scripts/provision.py
|
Python
|
apache-2.0
| 2,384
|
script = b'info = \'Set by the %s script\';'
def main(request, response):
type = request.GET[b'type']
if request.GET[b'type'] == b'fallingback':
return 404, [(b'Content-Type', b'text/plain')], u"Page not found"
return [(b'Content-Type', b'text/javascript')], script % type
|
scheib/chromium
|
third_party/blink/web_tests/external/wpt/html/browsers/offline/appcache/workers/resources/appcache-worker-import.py
|
Python
|
bsd-3-clause
| 294
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import re
import uuid
import logging
from django.db import models
from django.db.models import signals
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.urls import reverse
from django.core.files.storage import FileSystemStorage
from pinax.ratings.models import OverallRating
from tinymce.models import HTMLField
from geonode.base.models import ResourceBase, ResourceBaseManager, resourcebase_post_save
from geonode.people.utils import get_valid_user
from geonode.utils import check_shp_columnnames
from geonode.security.models import PermissionLevelMixin
from geonode.security.utils import remove_object_permissions
from geonode.notifications_helper import (
send_notification,
get_notification_recipients)
from ..services.enumerations import CASCADED
from ..services.enumerations import INDEXED
logger = logging.getLogger("geonode.layers.models")
shp_exts = ['.shp', ]
csv_exts = ['.csv']
kml_exts = ['.kml']
vec_exts = shp_exts + csv_exts + kml_exts
cov_exts = ['.tif', '.tiff', '.geotiff', '.geotif', '.asc']
TIME_REGEX = (
('[0-9]{8}', _('YYYYMMDD')),
('[0-9]{8}T[0-9]{6}', _("YYYYMMDD'T'hhmmss")),
('[0-9]{8}T[0-9]{6}Z', _("YYYYMMDD'T'hhmmss'Z'")),
)
TIME_REGEX_FORMAT = {
'[0-9]{8}': '%Y%m%d',
'[0-9]{8}T[0-9]{6}': '%Y%m%dT%H%M%S',
'[0-9]{8}T[0-9]{6}Z': '%Y%m%dT%H%M%SZ'
}
# these are only used if there is no user-configured value in the settings
_DEFAULT_CASCADE_WORKSPACE = "cascaded-services"
_DEFAULT_WORKSPACE = "cascaded-services"
class Style(models.Model, PermissionLevelMixin):
"""Model for storing styles.
"""
name = models.CharField(_('style name'), max_length=255, unique=True)
sld_title = models.CharField(max_length=255, null=True, blank=True)
sld_body = models.TextField(_('sld text'), null=True, blank=True)
sld_version = models.CharField(
_('sld version'),
max_length=12,
null=True,
blank=True)
sld_url = models.CharField(_('sld url'), null=True, max_length=1000)
workspace = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return "{0}".format(self.name)
def absolute_url(self):
if self.sld_url:
if self.sld_url.startswith(
settings.OGC_SERVER['default']['LOCATION']):
return self.sld_url.split(
settings.OGC_SERVER['default']['LOCATION'], 1)[1]
elif self.sld_url.startswith(settings.OGC_SERVER['default']['PUBLIC_LOCATION']):
return self.sld_url.split(
settings.OGC_SERVER['default']['PUBLIC_LOCATION'], 1)[1]
return self.sld_url
else:
logger.error(
"SLD URL is empty for Style %s" %
self.name)
return None
def get_self_resource(self):
"""Get associated resource base."""
# Associate this model with resource
try:
layer = self.layer_styles.first()
""":type: Layer"""
return layer.get_self_resource()
except Exception:
return None
class LayerManager(ResourceBaseManager):
def __init__(self):
models.Manager.__init__(self)
class UploadSession(models.Model):
"""Helper class to keep track of uploads.
"""
resource = models.ForeignKey(ResourceBase, blank=True, null=True, on_delete=models.CASCADE)
date = models.DateTimeField(auto_now=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
processed = models.BooleanField(default=False)
error = models.TextField(blank=True, null=True)
traceback = models.TextField(blank=True, null=True)
context = models.TextField(blank=True, null=True)
def successful(self):
return self.processed and self.errors is None
def __str__(self):
_s = "[Upload session-id: {}]".format(self.id)
try:
_s += " - {}".format(self.resource.title)
except Exception:
pass
return "{0}".format(_s)
def __unicode__(self):
return "{0}".format(self.__str__())
class Layer(ResourceBase):
"""
Layer (inherits ResourceBase fields)
"""
PERMISSIONS = {
'write': [
'change_layer_data',
'change_layer_style',
]
}
# internal fields
objects = LayerManager()
workspace = models.CharField(_('Workspace'), max_length=128)
store = models.CharField(_('Store'), max_length=128)
storeType = models.CharField(_('Storetype'), max_length=128)
name = models.CharField(_('Name'), max_length=128)
typename = models.CharField(_('Typename'), max_length=128, null=True, blank=True)
is_mosaic = models.BooleanField(_('Is mosaic?'), default=False)
has_time = models.BooleanField(_('Has time?'), default=False)
has_elevation = models.BooleanField(_('Has elevation?'), default=False)
time_regex = models.CharField(
_('Time regex'),
max_length=128,
null=True,
blank=True,
choices=TIME_REGEX)
elevation_regex = models.CharField(_('Elevation regex'), max_length=128, null=True, blank=True)
default_style = models.ForeignKey(
Style,
on_delete=models.SET_NULL,
related_name='layer_default_style',
null=True,
blank=True)
styles = models.ManyToManyField(Style, related_name='layer_styles')
remote_service = models.ForeignKey("services.Service", null=True, blank=True, on_delete=models.CASCADE)
charset = models.CharField(max_length=255, default='UTF-8')
upload_session = models.ForeignKey(UploadSession, blank=True, null=True, on_delete=models.CASCADE)
use_featureinfo_custom_template = models.BooleanField(
_('use featureinfo custom template?'),
help_text=_('specifies wether or not use a custom GetFeatureInfo template.'),
default=False
)
featureinfo_custom_template = HTMLField(
_('featureinfo custom template'),
help_text=_('the custom GetFeatureInfo template HTML contents.'),
unique=False,
blank=True,
null=True)
def is_vector(self):
return self.storeType == 'dataStore'
def get_upload_session(self):
return self.upload_session
@property
def processed(self):
self.upload_session = UploadSession.objects.filter(resource=self).first()
if self.upload_session:
return self.upload_session.processed
else:
return True
@property
def display_type(self):
if self.storeType == "dataStore":
return "Vector Data"
elif self.storeType == "coverageStore":
return "Raster Data"
else:
return "Data"
@property
def data_model(self):
if hasattr(self, 'modeldescription_set'):
lmd = self.modeldescription_set.all()
if lmd.exists():
return lmd.get().get_django_model()
return None
@property
def data_objects(self):
if self.data_model is not None:
return self.data_model.objects.using('datastore')
return None
@property
def ows_url(self):
if self.remote_service is not None and self.remote_service.method == INDEXED:
result = self.remote_service.service_url
else:
result = "{base}ows".format(
base=settings.OGC_SERVER['default']['PUBLIC_LOCATION'],
)
return result
@property
def ptype(self):
return self.remote_service.ptype if self.remote_service else "gxp_wmscsource"
@property
def service_typename(self):
if self.remote_service is not None:
return "%s:%s" % (self.remote_service.name, self.alternate)
else:
return self.alternate
@property
def attributes(self):
if self.attribute_set and self.attribute_set.count():
_attrs = self.attribute_set
else:
_attrs = Attribute.objects.filter(layer=self)
return _attrs.exclude(attribute='the_geom').order_by('display_order')
# layer geometry type.
@property
def gtype(self):
# return attribute type without 'gml:' and 'PropertyType'
if self.attribute_set and self.attribute_set.count():
_attrs = self.attribute_set
else:
_attrs = Attribute.objects.filter(layer=self)
if _attrs.filter(attribute='the_geom').exists():
_att_type = _attrs.filter(attribute='the_geom').first().attribute_type
_gtype = re.match(r'\(\'gml:(.*?)\',', _att_type)
return _gtype.group(1) if _gtype else None
return None
def get_base_file(self):
"""Get the shp or geotiff file for this layer.
"""
# If there was no upload_session return None
try:
if self.upload_session is None:
return None, None
except Exception:
return None, None
base_exts = [x.replace('.', '') for x in cov_exts + vec_exts]
base_files = self.upload_session.layerfile_set.filter(
name__in=base_exts)
base_files_count = base_files.count()
# If there are no files in the upload_session return None
if base_files_count == 0:
return None, None
msg = 'There should only be one main file (.shp or .geotiff or .asc), found %s' % base_files_count
assert base_files_count == 1, msg
# we need to check, for shapefile, if column names are valid
list_col = None
if self.storeType == 'dataStore':
valid_shp, wrong_column_name, list_col = check_shp_columnnames(
self)
if wrong_column_name:
msg = 'Shapefile has an invalid column name: %s' % wrong_column_name
else:
msg = _('File cannot be opened, maybe check the encoding')
# AF: Removing assertion since if the original file does not exists anymore
# it won't be possible to update Metadata anymore
# assert valid_shp, msg
# no error, let's return the base files
return base_files.get(), list_col
def get_absolute_url(self):
return reverse(
'layer_detail',
args=("%s:%s" % (self.store, self.alternate),)
)
@property
def embed_url(self):
return reverse('layer_embed', kwargs={'layername': self.service_typename})
def attribute_config(self):
# Get custom attribute sort order and labels if any
cfg = {}
visible_attributes = self.attribute_set.visible()
if (visible_attributes.count() > 0):
cfg["getFeatureInfo"] = {
"fields": [lyr.attribute for lyr in visible_attributes],
"propertyNames": {lyr.attribute: lyr.attribute_label for lyr in visible_attributes},
"displayTypes": {lyr.attribute: lyr.featureinfo_type for lyr in visible_attributes}
}
if self.use_featureinfo_custom_template:
cfg["ftInfoTemplate"] = self.featureinfo_custom_template
return cfg
def __str__(self):
return "{0}".format(self.alternate)
class Meta:
# custom permissions,
# change and delete are standard in django-guardian
permissions = (
('change_layer_data', 'Can edit layer data'),
('change_layer_style', 'Can change layer style'),
)
# Permission Level Constants
# LEVEL_NONE inherited
LEVEL_READ = 'layer_readonly'
LEVEL_WRITE = 'layer_readwrite'
LEVEL_ADMIN = 'layer_admin'
def maps(self):
from geonode.maps.models import MapLayer
return MapLayer.objects.filter(name=self.alternate)
@property
def class_name(self):
return self.__class__.__name__
def view_count_up(self, user, do_local=False):
""" increase view counter, if user is not owner and not super
@param user which views layer
@type User model
@param do_local - do local counter update even if pubsub is enabled
@type bool
"""
if user == self.owner or user.is_superuser:
return
if not do_local:
from geonode.messaging import producer
producer.viewing_layer(str(user), str(self.owner), self.id)
else:
Layer.objects.filter(id=self.id)\
.update(popular_count=models.F('popular_count') + 1)
class LayerFile(models.Model):
"""Helper class to store original files.
"""
upload_session = models.ForeignKey(UploadSession, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
base = models.BooleanField(default=False)
file = models.FileField(
upload_to='layers/%Y/%m/%d',
storage=FileSystemStorage(
base_url=settings.LOCAL_MEDIA_URL),
max_length=255)
class AttributeManager(models.Manager):
"""Helper class to access filtered attributes
"""
def visible(self):
return self.get_queryset().filter(
visible=True).order_by('display_order')
class Attribute(models.Model):
"""
Auxiliary model for storing layer attributes.
This helps reduce the need for runtime lookups
to other servers, and lets users customize attribute titles,
sort order, and visibility.
"""
layer = models.ForeignKey(
Layer,
blank=False,
null=False,
unique=False,
on_delete=models.CASCADE,
related_name='attribute_set')
attribute = models.CharField(
_('attribute name'),
help_text=_('name of attribute as stored in shapefile/spatial database'),
max_length=255,
blank=False,
null=True,
unique=False)
description = models.CharField(
_('attribute description'),
help_text=_('description of attribute to be used in metadata'),
max_length=255,
blank=True,
null=True)
attribute_label = models.CharField(
_('attribute label'),
help_text=_('title of attribute as displayed in GeoNode'),
max_length=255,
blank=True,
null=True,
unique=False)
attribute_type = models.CharField(
_('attribute type'),
help_text=_('the data type of the attribute (integer, string, geometry, etc)'),
max_length=50,
blank=False,
null=False,
default='xsd:string',
unique=False)
visible = models.BooleanField(
_('visible?'),
help_text=_('specifies if the attribute should be displayed in identify results'),
default=True)
display_order = models.IntegerField(
_('display order'),
help_text=_('specifies the order in which attribute should be displayed in identify results'),
default=1)
"""
Attribute FeatureInfo-Type list
"""
TYPE_PROPERTY = 'type_property'
TYPE_HREF = 'type_href'
TYPE_IMAGE = 'type_image'
TYPE_VIDEO_MP4 = 'type_video_mp4'
TYPE_VIDEO_OGG = 'type_video_ogg'
TYPE_VIDEO_WEBM = 'type_video_webm'
TYPE_VIDEO_3GP = 'type_video_3gp'
TYPE_VIDEO_FLV = 'type_video_flv'
TYPE_VIDEO_YOUTUBE = 'type_video_youtube'
TYPE_AUDIO = 'type_audio'
TYPE_IFRAME = 'type_iframe'
TYPES = ((TYPE_PROPERTY, _("Label"),),
(TYPE_HREF, _("URL"),),
(TYPE_IMAGE, _("Image",),),
(TYPE_VIDEO_MP4, _("Video (mp4)",),),
(TYPE_VIDEO_OGG, _("Video (ogg)",),),
(TYPE_VIDEO_WEBM, _("Video (webm)",),),
(TYPE_VIDEO_3GP, _("Video (3gp)",),),
(TYPE_VIDEO_FLV, _("Video (flv)",),),
(TYPE_VIDEO_YOUTUBE, _("Video (YouTube/VIMEO - embedded)",),),
(TYPE_AUDIO, _("Audio",),),
(TYPE_IFRAME, _("IFRAME",),),
)
featureinfo_type = models.CharField(
_('featureinfo type'),
help_text=_('specifies if the attribute should be rendered with an HTML widget on GetFeatureInfo template.'),
max_length=255,
unique=False,
blank=False,
null=False,
default=TYPE_PROPERTY,
choices=TYPES)
# statistical derivations
count = models.IntegerField(
_('count'),
help_text=_('count value for this field'),
default=1)
min = models.CharField(
_('min'),
help_text=_('minimum value for this field'),
max_length=255,
blank=False,
null=True,
unique=False,
default='NA')
max = models.CharField(
_('max'),
help_text=_('maximum value for this field'),
max_length=255,
blank=False,
null=True,
unique=False,
default='NA')
average = models.CharField(
_('average'),
help_text=_('average value for this field'),
max_length=255,
blank=False,
null=True,
unique=False,
default='NA')
median = models.CharField(
_('median'),
help_text=_('median value for this field'),
max_length=255,
blank=False,
null=True,
unique=False,
default='NA')
stddev = models.CharField(
_('standard deviation'),
help_text=_('standard deviation for this field'),
max_length=255,
blank=False,
null=True,
unique=False,
default='NA')
sum = models.CharField(
_('sum'),
help_text=_('sum value for this field'),
max_length=255,
blank=False,
null=True,
unique=False,
default='NA')
unique_values = models.TextField(
_('unique values for this field'),
null=True,
blank=True,
default='NA')
last_stats_updated = models.DateTimeField(_('last modified'), default=now, help_text=_(
'date when attribute statistics were last updated')) # passing the method itself, not
objects = AttributeManager()
def __str__(self):
return "{0}".format(
self.attribute_label if self.attribute_label else self.attribute)
def unique_values_as_list(self):
return self.unique_values.split(',')
def _get_alternate_name(instance):
if instance.remote_service is not None and instance.remote_service.method == INDEXED:
result = instance.name
elif instance.remote_service is not None and instance.remote_service.method == CASCADED:
result = "{}:{}".format(
getattr(settings, "CASCADE_WORKSPACE", _DEFAULT_CASCADE_WORKSPACE),
instance.name
)
else: # we are not dealing with a service-related instance
result = "{}:{}".format(
getattr(settings, "DEFAULT_WORKSPACE", _DEFAULT_WORKSPACE),
instance.name
)
return result
def pre_save_layer(instance, sender, **kwargs):
if kwargs.get('raw', False):
try:
_resourcebase_ptr = instance.resourcebase_ptr
instance.owner = _resourcebase_ptr.owner
instance.uuid = _resourcebase_ptr.uuid
instance.bbox_polygon = _resourcebase_ptr.bbox_polygon
instance.srid = _resourcebase_ptr.srid
except Exception as e:
logger.exception(e)
if instance.abstract == '' or instance.abstract is None:
instance.abstract = 'No abstract provided'
if instance.title == '' or instance.title is None:
instance.title = instance.name
# Set a default user for accountstream to work correctly.
if instance.owner is None:
instance.owner = get_valid_user()
logger.debug("handling UUID In pre_save_layer")
if hasattr(settings, 'LAYER_UUID_HANDLER') and settings.LAYER_UUID_HANDLER != '':
logger.debug("using custom uuid handler In pre_save_layer")
from geonode.layers.utils import get_uuid_handler
instance.uuid = get_uuid_handler()(instance).create_uuid()
else:
if instance.uuid == '':
instance.uuid = str(uuid.uuid1())
logger.debug("In pre_save_layer")
if instance.alternate is None:
instance.alternate = _get_alternate_name(instance)
logger.debug("instance.alternate is: {}".format(instance.alternate))
base_file, info = instance.get_base_file()
if info:
instance.info = info
if base_file is not None:
extension = '.%s' % base_file.name
if extension in vec_exts:
instance.storeType = 'dataStore'
elif extension in cov_exts:
instance.storeType = 'coverageStore'
if instance.bbox_polygon is None:
instance.set_bbox_polygon((-180, -90, 180, 90), 'EPSG:4326')
instance.set_bounds_from_bbox(
instance.bbox_polygon,
instance.bbox_polygon.srid
)
# Send a notification when a layer is created
if instance.pk is None and instance.title:
# Resource Created
notice_type_label = '%s_created' % instance.class_name.lower()
recipients = get_notification_recipients(notice_type_label, resource=instance)
send_notification(recipients, notice_type_label, {'resource': instance})
def pre_delete_layer(instance, sender, **kwargs):
"""
Remove any associated style to the layer, if it is not used by other layers.
Default style will be deleted in post_delete_layer
"""
if instance.remote_service is not None and instance.remote_service.method == INDEXED:
# we need to delete the maplayers here because in the post save layer.remote_service is not available anymore
# REFACTOR
from geonode.maps.models import MapLayer
logger.debug(
"Going to delete associated maplayers for [%s]",
instance.alternate)
MapLayer.objects.filter(
name=instance.alternate,
ows_url=instance.ows_url).delete()
return
logger.debug(
"Going to delete the styles associated for [%s]",
instance.alternate)
ct = ContentType.objects.get_for_model(instance)
OverallRating.objects.filter(
content_type=ct,
object_id=instance.id).delete()
default_style = instance.default_style
for style in instance.styles.all():
if style.layer_styles.all().count() == 1:
if style != default_style:
style.delete()
# Delete object permissions
remove_object_permissions(instance)
def post_delete_layer(instance, sender, **kwargs):
"""
Removed the layer from any associated map, if any.
Remove the layer default style.
"""
if instance.remote_service is not None and instance.remote_service.method == INDEXED:
return
from geonode.maps.models import MapLayer
logger.debug(
"Going to delete associated maplayers for [%s]", instance.name)
MapLayer.objects.filter(
name=instance.alternate,
ows_url=instance.ows_url).delete()
logger.debug(
"Going to delete the default style for [%s]", instance.name)
if instance.default_style and Layer.objects.filter(
default_style__id=instance.default_style.id).count() == 0:
instance.default_style.delete()
try:
if instance.upload_session:
for lf in instance.upload_session.layerfile_set.all():
lf.file.delete()
instance.upload_session.delete()
except UploadSession.DoesNotExist:
pass
def post_delete_layer_file(instance, sender, **kwargs):
"""Delete associated file.
:param instance: LayerFile instance
:type instance: LayerFile
"""
instance.file.delete(save=False)
signals.pre_save.connect(pre_save_layer, sender=Layer)
signals.post_save.connect(resourcebase_post_save, sender=Layer)
signals.pre_delete.connect(pre_delete_layer, sender=Layer)
signals.post_delete.connect(post_delete_layer, sender=Layer)
signals.post_delete.connect(post_delete_layer_file, sender=LayerFile)
|
francbartoli/geonode
|
geonode/layers/models.py
|
Python
|
gpl-3.0
| 24,953
|
import urllib.request
import time
preço = 99.99 #algum valor maior
while preço >= 4.74:
pagina = urllib.request.urlopen(
'http://beans.itcarlow.ie/prices-loyalty.html')
texto = pagina.read().decode('utf8')
onde = texto.find('>$')
início = onde + 2
fim = início + 4
preço = float(texto[início:fim])
if preço >= 4.74:
print ('Espera...')
time.sleep(600)
print ('Comprar! Preço: %5.2f' %preço)
|
wsricardo/mcestudos
|
treinamento-webScraping/Abraji/p11.py
|
Python
|
gpl-3.0
| 468
|
# Define plugins for py.test
import pytest
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true",
help="run slow tests")
|
aswolf/xmeos
|
xmeos/conftest.py
|
Python
|
mit
| 160
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.data_processing import base as dp_base
from tempest.common.utils import data_utils
from tempest import test
class NodeGroupTemplateTest(dp_base.BaseDataProcessingTest):
@classmethod
def resource_setup(cls):
super(NodeGroupTemplateTest, cls).resource_setup()
cls.node_group_template = {
'description': 'Test node group template',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'node_processes': [
'datanode',
'tasktracker'
],
'flavor_id': cls.flavor_ref,
'node_configs': {
'HDFS': {
'Data Node Heap Size': 1024
},
'MapReduce': {
'Task Tracker Heap Size': 1024
}
}
}
def _create_node_group_template(self, template_name=None):
"""Creates Node Group Template with optional name specified.
It creates template, ensures template name and response body.
Returns id and name of created template.
"""
if not template_name:
# generate random name if it's not specified
template_name = data_utils.rand_name('sahara-ng-template')
# create node group template
resp_body = self.create_node_group_template(template_name,
**self.node_group_template)
# ensure that template created successfully
self.assertEqual(template_name, resp_body['name'])
self.assertDictContainsSubset(self.node_group_template, resp_body)
return resp_body['id'], template_name
@test.attr(type='smoke')
def test_node_group_template_create(self):
self._create_node_group_template()
@test.attr(type='smoke')
def test_node_group_template_list(self):
template_info = self._create_node_group_template()
# check for node group template in list
templates = self.client.list_node_group_templates()
templates_info = [(template['id'], template['name'])
for template in templates]
self.assertIn(template_info, templates_info)
@test.attr(type='smoke')
def test_node_group_template_get(self):
template_id, template_name = self._create_node_group_template()
# check node group template fetch by id
template = self.client.get_node_group_template(template_id)
self.assertEqual(template_name, template['name'])
self.assertDictContainsSubset(self.node_group_template, template)
@test.attr(type='smoke')
def test_node_group_template_delete(self):
template_id, _ = self._create_node_group_template()
# delete the node group template by id
self.client.delete_node_group_template(template_id)
|
CiscoSystems/tempest
|
tempest/api/data_processing/test_node_group_templates.py
|
Python
|
apache-2.0
| 3,476
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('signal', parent_package, top_path)
config.add_data_dir('tests')
config.add_data_dir('benchmarks')
config.add_extension('sigtools',
sources=['sigtoolsmodule.c', 'firfilter.c',
'medianfilter.c', 'lfilter.c.src',
'correlate_nd.c.src'],
depends=['sigtools.h'],
include_dirs=['.']
)
config.add_extension('_spectral', sources=['_spectral.c'])
config.add_extension('_max_len_seq', sources=['_max_len_seq.c'])
spline_src = ['splinemodule.c', 'S_bspline_util.c', 'D_bspline_util.c',
'C_bspline_util.c', 'Z_bspline_util.c', 'bspline_util.c']
config.add_extension('spline', sources=spline_src)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
chaluemwut/fbserver
|
venv/lib/python2.7/site-packages/scipy/signal/setup.py
|
Python
|
apache-2.0
| 1,168
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.errors",
marshal="google.ads.googleads.v9",
manifest={"ConversionValueRuleErrorEnum",},
)
class ConversionValueRuleErrorEnum(proto.Message):
r"""Container for enum describing possible conversion value rule
errors.
"""
class ConversionValueRuleError(proto.Enum):
r"""Enum describing possible conversion value rule errors."""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_GEO_TARGET_CONSTANT = 2
CONFLICTING_INCLUDED_AND_EXCLUDED_GEO_TARGET = 3
CONFLICTING_CONDITIONS = 4
CANNOT_REMOVE_IF_INCLUDED_IN_VALUE_RULE_SET = 5
CONDITION_NOT_ALLOWED = 6
FIELD_MUST_BE_UNSET = 7
CANNOT_PAUSE_UNLESS_VALUE_RULE_SET_IS_PAUSED = 8
UNTARGETABLE_GEO_TARGET = 9
INVALID_AUDIENCE_USER_LIST = 10
INACCESSIBLE_USER_LIST = 11
INVALID_AUDIENCE_USER_INTEREST = 12
CANNOT_ADD_RULE_WITH_STATUS_REMOVED = 13
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleads/google-ads-python
|
google/ads/googleads/v9/errors/types/conversion_value_rule_error.py
|
Python
|
apache-2.0
| 1,666
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
@pytest.fixture
def resources(plan_runner):
_, resources = plan_runner()
return resources
def test_resource_count(resources):
"Test number of resources created."
assert len(resources) == 3
def test_iam(resources):
"Test IAM binding resources."
bindings = [r['values'] for r in resources if r['type']
== 'google_cloudfunctions_function_iam_binding']
assert len(bindings) == 1
assert bindings[0]['role'] == 'roles/cloudfunctions.invoker'
|
GoogleCloudPlatform/cloud-foundation-fabric
|
tests/modules/cloud_function/test_plan.py
|
Python
|
apache-2.0
| 1,062
|
#!/usr/bin/env python
"""
Create a directed graph, allowing multiple edges and self loops, from
a unix mailbox. The nodes are email addresses with links
that point from the sender to the recievers. The edge data
is a Python email.Message object which contains all of
the email message data.
This example shows the power of XDiGraph to hold edge data
of arbitrary Python objects (in this case a list of email messages).
By default, load the sample unix email mailbox called "unix_email.mbox".
You can load your own mailbox by naming it on the command line, eg
python unixemail.py /var/spool/mail/username
"""
# Author: Aric Hagberg (hagberg@lanl.gov)
# Copyright (C) 2005-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import email
from email.utils import getaddresses,parseaddr
import mailbox
import sys
# unix mailbox recipe
# see http://www.python.org/doc/current/lib/module-mailbox.html
def msgfactory(fp):
try:
return email.message_from_file(fp)
except email.Errors.MessageParseError:
# Don't return None since that will stop the mailbox iterator
return ''
if __name__ == '__main__':
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
pass
if len(sys.argv)==1:
filePath = "unix_email.mbox"
else:
filePath = sys.argv[1]
mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox
G=nx.MultiDiGraph() # create empty graph
# parse each messages and build graph
for msg in mbox: # msg is python email.Message.Message object
(source_name,source_addr) = parseaddr(msg['From']) # sender
# get all recipients
# see http://www.python.org/doc/current/lib/module-email.Utils.html
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
resent_tos = msg.get_all('resent-to', [])
resent_ccs = msg.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
# now add the edges for this mail message
for (target_name,target_addr) in all_recipients:
G.add_edge(source_addr,target_addr,message=msg)
# print edges with message subject
for (u,v,d) in G.edges_iter(data=True):
print("From: %s To: %s Subject: %s"%(u,v,d['message']["Subject"]))
try: # draw
pos=nx.spring_layout(G,iterations=10)
nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)
plt.savefig("unix_email.png")
plt.show()
except: # matplotlib not available
pass
|
elenanst/HPOlib
|
virtualHPOlib/share/doc/networkx-1.11/examples/graph/unix_email.py
|
Python
|
gpl-3.0
| 2,670
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import functools
import sys
import pasta
import six
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import renames_v2
from tensorflow.tools.compatibility import reorders_v2
# These pylint warnings are a mistake.
# pylint: disable=g-explicit-bool-comparison,g-bool-id-comparison
class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
# If the new argument is None, it will be removed.
# Only keyword args are handled, so make sure to also put any function in
# function_reorders to ensure that all args are made into keywords first.
self.function_keyword_renames = {
"tf.test.assert_equal_graph_def": {
"checkpoint_v2": None,
},
"tf.nn.embedding_lookup": {
"validate_indices": None,
},
"tf.image.sample_distorted_bounding_box": {
"seed2": None,
},
"tf.gradients": {
"colocate_gradients_with_ops": None,
},
"tf.hessians": {
"colocate_gradients_with_ops": None,
},
"*.minimize": {
"colocate_gradients_with_ops": None,
},
"*.compute_gradients": {
"colocate_gradients_with_ops": None,
},
"tf.cond": {
"strict": None,
"fn1": "true_fn",
"fn2": "false_fn"
},
"tf.argmin": {
"dimension": "axis",
},
"tf.argmax": {
"dimension": "axis",
},
"tf.arg_min": {
"dimension": "axis",
},
"tf.arg_max": {
"dimension": "axis",
},
"tf.math.argmin": {
"dimension": "axis",
},
"tf.math.argmax": {
"dimension": "axis",
},
"tf.image.crop_and_resize": {
"box_ind": "box_indices",
},
"tf.image.extract_image_patches": {
"ksizes": "sizes",
},
"tf.extract_image_patches": {
"ksizes": "sizes",
},
"tf.expand_dims": {
"dim": "axis",
},
"tf.batch_to_space": {
"block_size": "block_shape",
},
"tf.space_to_batch": {
"block_size": "block_shape",
},
"tf.nn.space_to_batch": {
"block_size": "block_shape",
},
"tf.constant": {
"verify_shape": "verify_shape_is_now_always_true",
},
"tf.convert_to_tensor": {
"preferred_dtype": "dtype_hint"
},
"tf.nn.softmax_cross_entropy_with_logits": {
"dim": "axis",
"_sentinel": None,
},
"tf.nn.softmax_cross_entropy_with_logits_v2": {
"dim": "axis"
},
"tf.linalg.l2_normalize": {
"dim": "axis",
},
"tf.linalg.norm": {
"keep_dims": "keepdims",
},
"tf.norm": {
"keep_dims": "keepdims",
},
"tf.load_file_system_library": {
"library_filename": "library_location",
},
"tf.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.math.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.nn.erosion2d": {
"kernel": "filters",
"rates": "dilations",
},
"tf.math.l2_normalize": {
"dim": "axis",
},
"tf.math.log_softmax": {
"dim": "axis",
},
"tf.math.softmax": {
"dim": "axis"
},
"tf.nn.l2_normalize": {
"dim": "axis",
},
"tf.nn.log_softmax": {
"dim": "axis",
},
"tf.nn.moments": {
"keep_dims": "keepdims",
},
"tf.nn.pool": {
"dilation_rate": "dilations"
},
"tf.nn.separable_conv2d": {
"rate": "dilations"
},
"tf.nn.depthwise_conv2d": {
"rate": "dilations"
},
"tf.nn.softmax": {
"dim": "axis"
},
"tf.nn.sufficient_statistics": {
"keep_dims": "keepdims"
},
"tf.debugging.assert_all_finite": {
"t": "x",
"msg": "message",
},
"tf.sparse.add": {
"thresh": "threshold",
},
"tf.sparse_add": {
"thresh": "threshold",
},
"tf.sparse.concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse_concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse.split": {
"split_dim": "axis",
},
"tf.sparse_split": {
"split_dim": "axis",
},
"tf.sparse.reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse.reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.nn.max_pool_with_argmax": {
"Targmax": "output_dtype",
},
"tf.nn.max_pool": {
"value": "input"
},
"tf.multinomial": {
"output_dtype": "dtype",
},
"tf.random.multinomial": {
"output_dtype": "dtype",
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis",
},
"tf.nn.batch_norm_with_global_normalization": {
"t": "input",
"m": "mean",
"v": "variance",
},
"tf.nn.dilation2d": {
"filter": "filters",
"rates": "dilations",
},
"tf.nn.conv3d": {
"filter": "filters"
},
"tf.zeros_like": {
"tensor": "input",
},
"tf.ones_like": {
"tensor": "input",
},
"tf.nn.conv2d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.conv3d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.convolution": {
"filter": "filters",
"dilation_rate": "dilations",
},
"tf.gfile.Exists": {
"filename": "path",
},
"tf.gfile.Remove": {
"filename": "path",
},
"tf.gfile.Stat": {
"filename": "path",
},
"tf.gfile.Glob": {
"filename": "pattern",
},
"tf.gfile.MkDir": {
"dirname": "path",
},
"tf.gfile.MakeDirs": {
"dirname": "path",
},
"tf.gfile.DeleteRecursively": {
"dirname": "path",
},
"tf.gfile.IsDirectory": {
"dirname": "path",
},
"tf.gfile.ListDirectory": {
"dirname": "path",
},
"tf.gfile.Copy": {
"oldpath": "src",
"newpath": "dst",
},
"tf.gfile.Rename": {
"oldname": "src",
"newname": "dst",
},
"tf.gfile.Walk": {
"in_order": "topdown",
},
"tf.random.stateless_multinomial": {
"output_dtype": "dtype",
},
"tf.string_to_number": {
"string_tensor": "input",
},
"tf.strings.to_number": {
"string_tensor": "input",
},
"tf.string_to_hash_bucket": {
"string_tensor": "input",
},
"tf.strings.to_hash_bucket": {
"string_tensor": "input",
},
"tf.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.strings.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis",
},
"tf.nn.weighted_moments": {
"keep_dims": "keepdims"
},
"tf.nn.conv1d": {
"value": "input",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d": {
"filter": "filters",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d_backprop_filter": {
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d_backprop_input": {
"filter": "filters",
"use_cudnn_on_gpu": None,
},
}
# pylint: disable=line-too-long
# Add additional renames not in renames_v2.py here.
# IMPORTANT: For the renames in here, if you also need to add to
# function_reorders or function_keyword_renames, use the OLD function name.
# These renames happen after the arguments have been processed.
self.manual_symbol_renames = {
"tf.batch_to_space_nd":
"tf.batch_to_space",
"tf.batch_gather":
"tf.gather",
"tf.space_to_batch_nd":
"tf.space_to_batch",
"tf.nn.space_to_batch":
"tf.space_to_batch",
"tf.estimator.inputs":
"tf.compat.v1.estimator.inputs",
"tf.extract_image_patches":
"tf.image.extract_image_patches",
"tf.gfile.Copy":
"tf.io.gfile.copy",
"tf.gfile.DeleteRecursively":
"tf.io.gfile.rmtree",
"tf.gfile.Exists":
"tf.io.gfile.exists",
"tf.gfile.Glob":
"tf.io.gfile.glob",
"tf.gfile.IsDirectory":
"tf.io.gfile.isdir",
"tf.gfile.ListDirectory":
"tf.io.gfile.listdir",
"tf.gfile.MakeDirs":
"tf.io.gfile.makedirs",
"tf.gfile.MkDir":
"tf.io.gfile.mkdir",
"tf.gfile.Remove":
"tf.io.gfile.remove",
"tf.gfile.Rename":
"tf.io.gfile.rename",
"tf.gfile.Stat":
"tf.io.gfile.stat",
"tf.gfile.Walk":
"tf.io.gfile.walk",
"tf.contrib.data.AUTOTUNE":
"tf.data.experimental.AUTOTUNE",
"tf.contrib.data.Counter":
"tf.data.experimental.Counter",
"tf.contrib.data.CheckpointInputPipelineHook":
"tf.data.experimental.CheckpointInputPipelineHook",
"tf.contrib.data.CsvDataset":
"tf.data.experimental.CsvDataset",
"tf.contrib.data.Optional":
"tf.data.experimental.Optional",
"tf.contrib.data.RandomDataset":
"tf.data.experimental.RandomDataset",
"tf.contrib.data.Reducer":
"tf.data.experimental.Reducer",
"tf.contrib.data.SqlDataset":
"tf.data.experimental.SqlDataset",
"tf.contrib.data.StatsAggregator":
"tf.data.experimental.StatsAggregator",
"tf.contrib.data.TFRecordWriter":
"tf.data.experimental.TFRecordWriter",
"tf.contrib.data.assert_element_shape":
"tf.data.experimental.assert_element_shape",
"tf.contrib.data.batch_and_drop_remainder":
"tf.compat.v1.contrib.data.batch_and_drop_remainder",
"tf.contrib.data.bucket_by_sequence_length":
"tf.data.experimental.bucket_by_sequence_length",
"tf.contrib.data.choose_from_datasets":
"tf.data.experimental.choose_from_datasets",
"tf.contrib.data.copy_to_device":
"tf.data.experimental.copy_to_device",
"tf.contrib.data.dense_to_sparse_batch":
"tf.data.experimental.dense_to_sparse_batch",
"tf.contrib.data.enumerate_dataset":
"tf.data.experimental.enumerate_dataset",
"tf.contrib.data.get_next_as_optional":
"tf.data.experimental.get_next_as_optional",
"tf.contrib.data.get_single_element":
"tf.data.experimental.get_single_element",
"tf.contrib.data.group_by_reducer":
"tf.data.experimental.group_by_reducer",
"tf.contrib.data.group_by_window":
"tf.data.experimental.group_by_window",
"tf.contrib.data.ignore_errors":
"tf.data.experimental.ignore_errors",
"tf.contrib.data.latency_stats":
"tf.data.experimental.latency_stats",
"tf.contrib.data.make_batched_features_dataset":
"tf.data.experimental.make_batched_features_dataset",
"tf.contrib.data.make_csv_dataset":
"tf.data.experimental.make_csv_dataset",
"tf.contrib.data.make_saveable_from_iterator":
"tf.data.experimental.make_saveable_from_iterator",
"tf.contrib.data.map_and_batch":
"tf.data.experimental.map_and_batch",
"tf.contrib.data.padded_batch_and_drop_remainder":
"tf.compat.v1.contrib.data.padded_batch_and_drop_remainder",
"tf.contrib.data.parallel_interleave":
"tf.data.experimental.parallel_interleave",
"tf.contrib.data.parse_example_dataset":
"tf.data.experimental.parse_example_dataset",
"tf.contrib.data.prefetch_to_device":
"tf.data.experimental.prefetch_to_device",
"tf.contrib.data.read_batch_features":
"tf.compat.v1.contrib.data.read_batch_features",
"tf.contrib.data.reduce_dataset":
"tf.compat.v1.contrib.data.reduce_dataset",
"tf.contrib.data.rejection_resample":
"tf.data.experimental.rejection_resample",
"tf.contrib.data.sample_from_datasets":
"tf.data.experimental.sample_from_datasets",
"tf.contrib.data.scan":
"tf.data.experimental.scan",
"tf.contrib.data.set_stats_aggregator":
"tf.data.experimental.set_stats_aggregator",
"tf.contrib.data.shuffle_and_repeat":
"tf.data.experimental.shuffle_and_repeat",
"tf.contrib.data.sliding_window_batch":
"tf.compat.v1.contrib.data.sliding_window_batch",
"tf.contrib.data.sloppy_interleave":
"tf.compat.v1.contrib.data.sloppy_interleave",
"tf.contrib.data.unbatch":
"tf.data.experimental.unbatch",
"tf.contrib.data.unique":
"tf.data.experimental.unique",
"tf.contrib.framework.CriticalSection":
"tf.CriticalSection",
"tf.contrib.framework.is_tensor":
"tf.is_tensor",
"tf.contrib.framework.nest.assert_same_structure":
"tf.nest.assert_same_structure",
"tf.contrib.framework.nest.flatten":
"tf.nest.flatten",
"tf.contrib.framework.nest.map_structure":
"tf.nest.map_structure",
"tf.contrib.framework.nest.pack_sequence_as":
"tf.nest.pack_sequence_as",
"tf.contrib.util.constant_value":
"tf.get_static_value",
"tf.contrib.saved_model.load_keras_model":
"tf.keras.experimental.load_from_saved_model",
"tf.contrib.saved_model.save_keras_model":
"tf.keras.experimental.export",
"tf.contrib.rnn.RNNCell":
"tf.nn.rnn_cell.RNNCell",
"tf.contrib.rnn.LSTMStateTuple":
"tf.nn.rnn_cell.LSTMStateTuple",
"tf.contrib.framework.sort":
"tf.sort",
"tf.contrib.framework.argsort":
"tf.argsort",
"tf.count_nonzero":
"tf.math.count_nonzero",
"tf.manip.batch_to_space_nd":
"tf.batch_to_space",
"tf.quantize_v2":
"tf.quantization.quantize",
"tf.sparse_add":
"tf.sparse.add",
"tf.sparse_concat":
"tf.sparse.concat",
"tf.sparse_split":
"tf.sparse.split",
"tf.sparse_matmul":
"tf.linalg.matmul",
"tf.sparse_reduce_sum":
"tf.sparse.reduce_sum",
"tf.sparse_reduce_max":
"tf.sparse.reduce_max",
"tf.random.stateless_multinomial":
"tf.random.stateless_categorical",
"tf.substr":
"tf.strings.substr",
"tf.string_to_hash_bucket":
"tf.strings.to_hash_bucket",
"tf.string_to_number":
"tf.strings.to_number",
"tf.multinomial":
"tf.random.categorical",
"tf.random.multinomial":
"tf.random.categorical",
"tf.reduce_join":
"tf.strings.reduce_join",
"tf.load_file_system_library":
"tf.load_library",
"tf.pywrap_tensorflow":
"tf.compat.v1.pywrap_tensorflow",
"tf.bincount":
"tf.math.bincount",
"tf.confusion_matrix":
"tf.math.confusion_matrix",
"tf.train.confusion_matrix":
"tf.math.confusion_matrix",
"tf.decode_csv":
"tf.io.decode_csv",
"tf.data.Iterator":
"tf.compat.v1.data.Iterator",
"tf.parse_example":
"tf.io.parse_example",
"tf.parse_single_example":
"tf.io.parse_single_example",
"tf.nn.fused_batch_norm":
"tf.compat.v1.nn.fused_batch_norm",
"tf.nn.softmax_cross_entropy_with_logits_v2":
"tf.nn.softmax_cross_entropy_with_logits",
"tf.losses.Reduction.MEAN":
"tf.compat.v1.losses.Reduction.MEAN",
"tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS":
"tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS",
"tf.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS":
"tf.compat.v1.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS",
"tf.lite.constants.FLOAT":
"tf.float32",
"tf.lite.constants.INT32":
"tf.int32",
"tf.lite.constants.INT64":
"tf.int64",
"tf.lite.constants.STRING":
"tf.string",
"tf.lite.constants.QUANTIZED_UINT8":
"tf.uint8",
"tf.arg_max":
"tf.argmax",
"tf.arg_min":
"tf.argmin",
# tf.nn.ctc_loss is still available in 2.0 but behavior
# changed significantly.
"tf.nn.ctc_loss":
"tf.compat.v1.nn.ctc_loss",
"tf.zeros_initializer":
"tf.compat.v1.initializers.zeros",
"tf.ones_initializer":
"tf.compat.v1.initializers.ones",
"tf.constant_initializer":
"tf.compat.v1.initializers.constant",
"tf.random_uniform_initializer":
"tf.compat.v1.initializers.random_uniform",
"tf.random_normal_initializer":
"tf.compat.v1.initializers.random_normal",
"tf.truncated_normal_initializer":
"tf.compat.v1.initializers.truncated_normal",
"tf.image.resize_images":
"tf.image.resize",
"tf.random_poisson":
"tf.random.poisson",
"tf.debugging.assert_greater":
"tf.compat.v1.debugging.assert_greater",
"tf.debugging.assert_greater_equal":
"tf.compat.v1.debugging.assert_greater_equal",
"tf.debugging.assert_integer":
"tf.compat.v1.debugging.assert_integer",
"tf.debugging.assert_less":
"tf.compat.v1.debugging.assert_less",
"tf.debugging.assert_less_equal":
"tf.compat.v1.debugging.assert_less_equal",
"tf.debugging.assert_near":
"tf.compat.v1.debugging.assert_near",
"tf.debugging.assert_negative":
"tf.compat.v1.debugging.assert_negative",
"tf.debugging.assert_non_negative":
"tf.compat.v1.debugging.assert_non_negative",
"tf.debugging.assert_non_positive":
"tf.compat.v1.debugging.assert_non_positive",
"tf.debugging.assert_none_equal":
"tf.compat.v1.debugging.assert_none_equal",
"tf.debugging.assert_type":
"tf.compat.v1.debugging.assert_type",
"tf.debugging.assert_positive":
"tf.compat.v1.debugging.assert_positive",
"tf.debugging.assert_equal":
"tf.compat.v1.debugging.assert_equal",
"tf.debugging.assert_scalar":
"tf.compat.v1.debugging.assert_scalar",
"tf.assert_equal":
"tf.compat.v1.assert_equal",
"tf.assert_less":
"tf.compat.v1.assert_less",
"tf.assert_greater":
"tf.compat.v1.assert_greater",
"tf.debugging.assert_rank":
"tf.compat.v1.debugging.assert_rank",
"tf.debugging.assert_rank_at_least":
"tf.compat.v1.debugging.assert_rank_at_least",
"tf.debugging.assert_rank_in":
"tf.compat.v1.debugging.assert_rank_in",
"tf.assert_rank":
"tf.compat.v1.assert_rank",
"tf.contrib.framework.argsort":
"tf.argsort",
"tf.nn.max_pool":
"tf.nn.max_pool2d",
'tf.keras.initializers.zeros':
'tf.compat.v1.keras.initializers.zeros',
'tf.keras.initializers.ones':
'tf.compat.v1.keras.initializers.ones',
'tf.keras.initializers.constant':
'tf.compat.v1.keras.initializers.constant',
"tf.data.experimental.map_and_batch_with_legacy_function":
"tf.compat.v1.data.experimental.map_and_batch_with_legacy_function",
}
# pylint: enable=line-too-long
# Mapping from function to the new name of the function
self.symbol_renames = renames_v2.renames
self.symbol_renames.update(self.manual_symbol_renames)
self.symbol_renames = {
name: new_name
for name, new_name in self.symbol_renames.items()
}
# Variables that should be changed to functions.
self.change_to_function = {}
# pylint: disable=line-too-long
# This list should just contain names of functions that had
# their arguments reordered. After adding a function name to the list
# run the following to update reorders_v2.py:
# bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
# bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
# pylint: enable=line-too-long
self.reordered_function_names = {
"tf.io.serialize_sparse",
"tf.io.serialize_many_sparse",
"tf.argmax",
"tf.argmin",
"tf.batch_to_space",
"tf.cond",
"tf.nn.space_to_batch",
"tf.boolean_mask",
"tf.convert_to_tensor",
"tf.nn.conv1d",
"tf.nn.conv2d",
"tf.nn.conv2d_backprop_filter",
"tf.nn.conv2d_backprop_input",
"tf.nn.ctc_beam_search_decoder",
"tf.nn.moments",
"tf.nn.convolution",
"tf.nn.crelu",
"tf.nn.weighted_moments",
"tf.nn.pool",
"tf.nn.separable_conv2d",
"tf.nn.depthwise_conv2d",
"tf.multinomial",
"tf.random.multinomial",
"tf.pad",
"tf.quantize_v2",
"tf.feature_column.categorical_column_with_vocabulary_file",
"tf.shape",
"tf.size",
"tf.random.poisson",
"tf.sparse.add",
"tf.sparse_add",
"tf.sparse.concat",
"tf.sparse_concat",
"tf.sparse.segment_mean",
"tf.sparse.segment_sqrt_n",
"tf.sparse.segment_sum",
"tf.sparse_matmul",
"tf.sparse.reduce_max",
"tf.sparse_reduce_max",
"tf.io.decode_csv",
"tf.strings.length",
"tf.strings.reduce_join",
"tf.strings.substr",
"tf.substr",
"tf.transpose",
"tf.tuple",
"tf.parse_example",
"tf.parse_single_example",
"tf.io.parse_example",
"tf.io.parse_single_example",
"tf.while_loop",
"tf.reduce_all",
"tf.math.reduce_all",
"tf.reduce_any",
"tf.math.reduce_any",
"tf.reduce_min",
"tf.math.reduce_min",
"tf.reduce_max",
"tf.math.reduce_max",
"tf.reduce_sum",
"tf.math.reduce_sum",
"tf.reduce_mean",
"tf.math.reduce_mean",
"tf.reduce_prod",
"tf.math.reduce_prod",
"tf.reduce_logsumexp",
"tf.math.reduce_logsumexp",
"tf.reduce_join",
"tf.confusion_matrix",
"tf.math.confusion_matrix",
"tf.math.in_top_k",
"tf.nn.depth_to_space",
"tf.nn.embedding_lookup",
"tf.nn.embedding_lookup_sparse",
"tf.nn.in_top_k",
"tf.nn.space_to_depth",
"tf.test.assert_equal_graph_def",
"tf.linalg.norm",
"tf.norm",
"tf.reverse_sequence",
"tf.sparse_split",
# tf.nn.softmax_cross_entropy_with_logits *must* be called with
# keyword arguments. Add keyword arguments in rare case when they
# are not specified.
"tf.nn.softmax_cross_entropy_with_logits",
"tf.nn.fractional_avg_pool",
"tf.nn.fractional_max_pool",
"tf.image.sample_distorted_bounding_box",
"tf.gradients",
"tf.hessians",
"tf.nn.max_pool",
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = reorders_v2.reorders
contrib_warning = (
ast_edits.ERROR,
"<function name> cannot be converted automatically. tf.contrib will not"
" be distributed with TensorFlow 2.0, please consider an alternative in"
" non-contrib TensorFlow, a community-maintained repository, or fork "
"the required code."
)
flags_warning = (
ast_edits.ERROR,
"tf.flags has been removed, please use the argparse or absl"
" modules if you need command line parsing.")
decay_function_comment = (
ast_edits.INFO,
"To use learning rate decay schedules with TensorFlow 2.0, switch to "
"the schedules in `tf.keras.optimizers.schedules`.\n"
)
assert_return_type_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, the "
"data argument has been removed, and arguments have been reordered."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
assert_rank_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, and"
" the data and summarize arguments have been removed."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
initializers_no_dtype_comment = (
ast_edits.INFO,
"Initializers no longer have the "
"dtype argument in the constructor or partition_info argument in the "
"__call__ method.\nThe calls have been converted to compat.v1 for"
"safety (even though they may already have been correct).")
metrics_comment = (
ast_edits.INFO,
"tf.metrics have been replaced with object oriented versions in"
" TF 2.0 and after. The metric function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
losses_comment = (
ast_edits.INFO,
"tf.losses have been replaced with object oriented versions in"
" TF 2.0 and after. The loss function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
# This could be done with a _rename_if_arg_not_found_transformer
deprecate_partition_strategy_comment = (
ast_edits.WARNING,
"`partition_strategy` has been removed from <function name>. "
" The 'div' strategy will be used by default.")
# TODO(b/118888586): add default value change to update script.
default_loss_reduction_changed = (
ast_edits.WARNING,
"default value of loss_reduction has been changed to "
"SUM_OVER_BATCH_SIZE.\n"
)
# make change instead
uniform_unit_scaling_initializer_comment = (
ast_edits.ERROR,
"uniform_unit_scaling_initializer has been removed. Please use"
" tf.initializers.variance_scaling instead with distribution=uniform "
"to get equivalent behaviour.")
# Make change instead (issue warning about strip_...)
export_saved_model_renamed = (
ast_edits.ERROR,
"(Manual edit required) Please rename the method export_savedmodel() "
"to export_saved_model(). Two things to note:\n\t(1) The argument "
"strip_default_attributes has been removed. The function will always "
"strip the default attributes from ops. If this breaks your code, "
"please switch to tf.compat.v1.estimator.Estimator.\n\t(2) This change "
"only effects core estimator. If you are using "
"tf.contrib.learn.Estimator, please switch to using core estimator.")
# Function warnings. <function name> placeholder inside warnings will be
# replaced by function name.
# You can use *. to add items which do not check the FQN, and apply to e.g.,
# methods.
self.function_warnings = {
"*.export_savedmodel":
export_saved_model_renamed,
"tf.assert_equal":
assert_return_type_comment,
"tf.assert_none_equal":
assert_return_type_comment,
"tf.assert_negative":
assert_return_type_comment,
"tf.assert_positive":
assert_return_type_comment,
"tf.assert_non_negative":
assert_return_type_comment,
"tf.assert_non_positive":
assert_return_type_comment,
"tf.assert_near":
assert_return_type_comment,
"tf.assert_less":
assert_return_type_comment,
"tf.assert_less_equal":
assert_return_type_comment,
"tf.assert_greater":
assert_return_type_comment,
"tf.assert_greater_equal":
assert_return_type_comment,
"tf.assert_integer":
assert_return_type_comment,
"tf.assert_type":
assert_return_type_comment,
"tf.assert_scalar":
assert_return_type_comment,
"tf.assert_rank":
assert_rank_comment,
"tf.assert_rank_at_least":
assert_rank_comment,
"tf.assert_rank_in":
assert_rank_comment,
"tf.debugging.assert_equal":
assert_return_type_comment,
"tf.debugging.assert_greater":
assert_return_type_comment,
"tf.debugging.assert_greater_equal":
assert_return_type_comment,
"tf.debugging.assert_integer":
assert_return_type_comment,
"tf.debugging.assert_less":
assert_return_type_comment,
"tf.debugging.assert_less_equal":
assert_return_type_comment,
"tf.debugging.assert_near":
assert_return_type_comment,
"tf.debugging.assert_negative":
assert_return_type_comment,
"tf.debugging.assert_non_negative":
assert_return_type_comment,
"tf.debugging.assert_non_positive":
assert_return_type_comment,
"tf.debugging.assert_none_equal":
assert_return_type_comment,
"tf.debugging.assert_positive":
assert_return_type_comment,
"tf.debugging.assert_type":
assert_return_type_comment,
"tf.debugging.assert_scalar":
assert_return_type_comment,
"tf.debugging.assert_rank":
assert_rank_comment,
"tf.debugging.assert_rank_at_least":
assert_rank_comment,
"tf.debugging.assert_rank_in":
assert_rank_comment,
"tf.train.exponential_decay":
decay_function_comment,
"tf.train.piecewise_constant_decay":
decay_function_comment,
"tf.train.polynomial_decay":
decay_function_comment,
"tf.train.natural_exp_decay":
decay_function_comment,
"tf.train.inverse_time_decay":
decay_function_comment,
"tf.train.cosine_decay":
decay_function_comment,
"tf.train.cosine_decay_restarts":
decay_function_comment,
"tf.train.linear_cosine_decay":
decay_function_comment,
"tf.train.noisy_linear_cosine_decay":
decay_function_comment,
"tf.estimator.LinearClassifier":
default_loss_reduction_changed,
"tf.estimator.LinearRegressor":
default_loss_reduction_changed,
"tf.estimator.DNNLinearCombinedClassifier":
default_loss_reduction_changed,
"tf.estimator.DNNLinearCombinedRegressor":
default_loss_reduction_changed,
"tf.estimator.DNNRegressor":
default_loss_reduction_changed,
"tf.estimator.DNNClassifier":
default_loss_reduction_changed,
"tf.estimator.BaselineClassifier":
default_loss_reduction_changed,
"tf.estimator.BaselineRegressor":
default_loss_reduction_changed,
"tf.nn.nce_loss":
deprecate_partition_strategy_comment,
"tf.nn.safe_embedding_lookup_sparse":
deprecate_partition_strategy_comment,
"tf.nn.sampled_softmax_loss":
deprecate_partition_strategy_comment,
"tf.keras.initializers.Zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.Ones":
initializers_no_dtype_comment,
"tf.keras.initializers.ones":
initializers_no_dtype_comment,
"tf.keras.initializers.Constant":
initializers_no_dtype_comment,
"tf.keras.initializers.constant":
initializers_no_dtype_comment,
"tf.keras.initializers.VarianceScaling":
initializers_no_dtype_comment,
"tf.keras.initializers.Orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.Identity":
initializers_no_dtype_comment,
"tf.keras.initializers.identity":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.initializers.zeros":
initializers_no_dtype_comment,
"tf.zeros_initializer":
initializers_no_dtype_comment,
"tf.initializers.ones":
initializers_no_dtype_comment,
"tf.ones_initializer":
initializers_no_dtype_comment,
"tf.initializers.constant":
initializers_no_dtype_comment,
"tf.constant_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_uniform":
initializers_no_dtype_comment,
"tf.random_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_normal":
initializers_no_dtype_comment,
"tf.random_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.truncated_normal":
initializers_no_dtype_comment,
"tf.truncated_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.variance_scaling":
initializers_no_dtype_comment,
"tf.variance_scaling_initializer":
initializers_no_dtype_comment,
"tf.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.orthogonal_initializer":
initializers_no_dtype_comment,
"tf.initializers.identity":
initializers_no_dtype_comment,
"tf.glorot_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.glorot_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.initializers.uniform_unit_scaling":
uniform_unit_scaling_initializer_comment,
"tf.uniform_unit_scaling_initializer":
uniform_unit_scaling_initializer_comment,
"tf.losses.absolute_difference":
losses_comment,
"tf.losses.add_loss":
losses_comment,
"tf.losses.compute_weighted_loss":
losses_comment,
"tf.losses.cosine_distance":
losses_comment,
"tf.losses.get_losses":
losses_comment,
"tf.losses.get_regularization_loss":
losses_comment,
"tf.losses.get_regularization_losses":
losses_comment,
"tf.losses.get_total_loss":
losses_comment,
"tf.losses.hinge_loss":
losses_comment,
"tf.losses.huber_loss":
losses_comment,
"tf.losses.log_loss":
losses_comment,
"tf.losses.mean_pairwise_squared_error":
losses_comment,
"tf.losses.mean_squared_error":
losses_comment,
"tf.losses.sigmoid_cross_entropy":
losses_comment,
"tf.losses.softmax_cross_entropy":
losses_comment,
"tf.losses.sparse_softmax_cross_entropy":
losses_comment,
"tf.metrics.accuracy":
metrics_comment,
"tf.metrics.auc":
metrics_comment,
"tf.metrics.average_precision_at_k":
metrics_comment,
"tf.metrics.false_negatives":
metrics_comment,
"tf.metrics.false_negatives_at_thresholds":
metrics_comment,
"tf.metrics.false_positives":
metrics_comment,
"tf.metrics.false_positives_at_thresholds":
metrics_comment,
"tf.metrics.mean":
metrics_comment,
"tf.metrics.mean_absolute_error":
metrics_comment,
"tf.metrics.mean_cosine_distance":
metrics_comment,
"tf.metrics.mean_iou":
metrics_comment,
"tf.metrics.mean_per_class_accuracy":
metrics_comment,
"tf.metrics.mean_relative_error":
metrics_comment,
"tf.metrics.mean_squared_error":
metrics_comment,
"tf.metrics.mean_tensor":
metrics_comment,
"tf.metrics.percentage_below":
metrics_comment,
"tf.metrics.precision":
metrics_comment,
"tf.metrics.precision_at_k":
metrics_comment,
"tf.metrics.precision_at_thresholds":
metrics_comment,
"tf.metrics.precision_at_top_k":
metrics_comment,
"tf.metrics.recall":
metrics_comment,
"tf.metrics.recall_at_k":
metrics_comment,
"tf.metrics.recall_at_thresholds":
metrics_comment,
"tf.metrics.recall_at_top_k":
metrics_comment,
"tf.metrics.root_mean_squared_error":
metrics_comment,
"tf.metrics.sensitivity_at_specificity":
metrics_comment,
"tf.metrics.sparse_average_precision_at_k":
metrics_comment,
"tf.metrics.sparse_precision_at_k":
metrics_comment,
"tf.metrics.specificity_at_sensitivity":
metrics_comment,
"tf.metrics.true_negatives":
metrics_comment,
"tf.metrics.true_negatives_at_thresholds":
metrics_comment,
"tf.metrics.true_positives":
metrics_comment,
"tf.metrics.true_positives_at_thresholds":
metrics_comment,
}
# Warnings that are emitted only if a specific arg is found.
self.function_arg_warnings = {
"tf.nn.conv1d": {
("use_cudnn_on_gpu", 4): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d": {
("use_cudnn_on_gpu", 4): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_filter": {
("use_cudnn_on_gpu", 5): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_input": {
("use_cudnn_on_gpu", 5): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.gradients": {
("colocate_gradients_with_ops", 4): (
ast_edits.INFO,
"tf.gradients no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.minimize": {
("colocate_gradients_with_ops", 5): (
ast_edits.INFO,
"Optimizer.minimize no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.compute_gradients": {
("colocate_gradients_with_ops", 4): (
ast_edits.INFO,
"Optimizer.compute_gradients no "
"longer takes 'colocate_gradients_with_ops' argument, it "
"behaves as if it was set to True."),
},
"tf.cond": {
("strict", 3): (
ast_edits.WARNING,
"tf.cond no longer takes 'strict' argument, it behaves as "
"if was set to True.")
},
}
# Specially handled functions
# Each transformer is a callable which will be called with the arguments
# transformer(parent, node, full_name, name, logs, errors)
# Where logs is a list to which (level, line, col, msg) tuples can be
# appended, full_name is the FQN of the function called (or None if that is
# unknown), name is the name of the function called (or None is that is
# unknown). node is an ast.Call node representing this function call, and
# parent is its parent in the AST.
# The function may modify node (but not parent), and must return
# - none, if nothing was modified
# - node, if node was modified in place (make sure to use
# pasta.ast_utils.replace_child to swap out children, otherwise formatting
# may get messy)
# - a replacement for node, if the whole call node was replaced. The caller
# will take care of changing parent.
self.function_transformers = {
"*.make_initializable_iterator": _iterator_transformer,
"*.make_one_shot_iterator": _iterator_transformer,
"tf.nn.dropout": _dropout_transformer,
"tf.to_bfloat16": _cast_transformer,
"tf.to_complex128": _cast_transformer,
"tf.to_complex64": _cast_transformer,
"tf.to_double": _cast_transformer,
"tf.to_float": _cast_transformer,
"tf.to_int32": _cast_transformer,
"tf.to_int64": _cast_transformer,
"tf.nn.softmax_cross_entropy_with_logits":
_softmax_cross_entropy_with_logits_transformer,
"tf.image.extract_glimpse": _extract_glimpse_transformer,
"tf.image.resize_area": _image_resize_transformer,
"tf.image.resize_bicubic": _image_resize_transformer,
"tf.image.resize_bilinear": _image_resize_transformer,
"tf.image.resize_nearest_neighbor": _image_resize_transformer,
"tf.nn.fractional_avg_pool": _pool_seed_transformer,
"tf.nn.fractional_max_pool": _pool_seed_transformer,
"tf.device": functools.partial(
_rename_if_arg_found_transformer, arg_name="device_name",
arg_ok_predicate=_is_ast_str, remove_if_ok=False,
message="tf.device no longer takes functions as an argument. "
"We could not determine that the argument value is a string, so "
"the call was converted to compat.v1."),
"tf.zeros_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.zeros_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.ones_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.ones_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.while_loop": functools.partial(
_rename_if_arg_found_transformer,
arg_name="return_same_structure",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.while_loop no longer takes 'return_same_structure' "
"argument and behaves as if return_same_structure=True. This call "
"site specifies something other than return_same_structure=True, "
"so it was converted to compat.v1."),
"tf.nn.ctc_beam_search_decoder": functools.partial(
_rename_if_arg_found_transformer,
arg_name="merge_repeated",
arg_ok_predicate=_is_ast_false, remove_if_ok=True,
message="tf.nn.ctc_beam_search_decoder no longer takes the "
"'merge_repeated' argument and behaves as if merge_repeated=False. "
"This call site specifies something other than "
"merge_repeated=False, so it was converted to compat.v1."),
"tf.nn.erosion2d": functools.partial(
_add_argument_transformer,
arg_name="data_format",
arg_value_ast=ast.Str("NHWC")),
}
self.module_deprecations = {
"tf.contrib": contrib_warning,
"tf.flags": flags_warning,
}
def _is_ast_str(node):
"""Determine whether this node represents a string."""
allowed_types = [ast.Str]
if hasattr(ast, "Bytes"):
allowed_types += [ast.Bytes]
if hasattr(ast, "JoinedStr"):
allowed_types += [ast.JoinedStr]
if hasattr(ast, "FormattedValue"):
allowed_types += [ast.FormattedValue]
return isinstance(node, allowed_types)
def _is_ast_true(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is True
else:
return isinstance(node, ast.Name) and node.id == "True"
def _is_ast_false(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is False
else:
return isinstance(node, ast.Name) and node.id == "False"
# Lots of unused arguments below, since these are called in a standard manner.
# pylint: disable=unused-argument
def _rename_if_arg_found_transformer(parent, node, full_name, name, logs,
arg_name=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Replaces the given call with tf.compat.v1 if the given arg is found.
This requires the function to be called with all named args, so for using
this transformer, the function should also be added to renames.
If the arg is not found, the call site is left alone.
If the arg is found, and if arg_ok_predicate is given, it is called with
the ast Expression representing the argument value found. If it returns
True, the function is left alone.
If the arg is found, arg_ok_predicate is not None and returns ok, and
remove_if_ok is True, the argument is removed from the call.
Otherwise, `compat.v1` is inserted between tf and the function name.
Args:
parent: Parent of node.
node: ast.Call node to maybe modify.
full_name: full name of function to modify
name: name of function to modify
logs: list of logs to append to
arg_name: name of the argument to look for
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
# Check whether arg is there.
arg_present, arg_value = ast_edits.get_arg_value(node, arg_name)
if not arg_present:
return
# Check whether arg is problematic (and if not, maybe remove it).
if arg_ok_predicate and arg_ok_predicate(arg_value):
if remove_if_ok:
for i, kw in enumerate(node.keywords):
if kw.arg == arg_name:
node.keywords.pop(i)
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Removed argument %s for function %s" % (
arg_name, full_name or name)))
break
return node
else:
return
# All conditions met, insert v1 and log what we did.
# We must have a full name, so the func is an attribute.
new_name = full_name.replace("tf.", "tf.compat.v1.", 1)
node.func = ast_edits.full_name_node(new_name)
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Renaming %s to %s because argument %s is present. %s" %
(full_name, new_name, arg_name, message if message is not None else "")
))
return node
def _add_argument_transformer(parent, node, full_name, name, logs,
arg_name, arg_value_ast):
"""Adds an argument (as a final kwarg arg_name=arg_value_ast)."""
node.keywords.append(ast.keyword(arg=arg_name, value=arg_value_ast))
logs.add((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding argument '%s' to call to %s." % (pasta.dump(node.keywords[-1],
full_name or name))
))
return node
def _iterator_transformer(parent, node, full_name, name, logs):
"""Transform iterator methods to compat function calls."""
# First, check that node.func.value is not already something we like
# (tf.compat.v1.data), or something which is handled in the rename
# (tf.data). This transformer only handles the method call to function call
# conversion.
if full_name and (full_name.startswith("tf.compat.v1.data") or
full_name.startswith("tf.data")):
return
# This should never happen, since we're only called for Attribute nodes.
if not isinstance(node.func, ast.Attribute):
return
# Transform from x.f(y) to tf.compat.v1.data.f(x, y)
# Fortunately, node.func.value should already have valid position info
node.args = [node.func.value] + node.args
node.func.value = ast_edits.full_name_node("tf.compat.v1.data")
logs.append((ast_edits.WARNING, node.lineno, node.col_offset,
"Changing dataset.%s() to tf.compat.v1.data.%s(dataset). "
"Please check this transformation.\n" % (name, name)))
return node
def _dropout_transformer(parent, node, full_name, name, logs):
"""Replace keep_prob with 1-rate."""
def _replace_keep_prob_node(parent, old_value):
"""Replaces old_value with 1-(old_value)."""
one = ast.Num(n=1)
one.lineno = 0
one.col_offset = 0
new_value = ast.BinOp(left=one, op=ast.Sub(),
right=old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around keep_prob.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Check if we have a keep_prob keyword arg
for keep_prob in node.keywords:
if keep_prob.arg == "keep_prob":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate\n"))
keep_prob.arg = "rate"
_replace_keep_prob_node(keep_prob, keep_prob.value)
return node
# Maybe it was a positional arg
if len(node.args) < 2:
logs.append((ast_edits.ERROR, node.lineno, node.col_offset,
"tf.nn.dropout called without arguments, so "
"automatic fix was disabled. tf.nn.dropout has changed "
"the semantics of the second argument."))
else:
_replace_keep_prob_node(node, node.args[1])
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate, and "
"recomputing value.\n"))
return node
def _cast_transformer(parent, node, full_name, name, logs):
"""Transforms to_int and to_float to cast(..., dtype=...)."""
# Find out the dtype to cast to from the function name
dtype_str = name[3:]
# Special cases where the full dtype is not given
if dtype_str == "float":
dtype_str = "float32"
elif dtype_str == "double":
dtype_str = "float64"
new_arg = ast.keyword(arg="dtype",
value=ast.Attribute(value=ast.Name(id="tf",
ctx=ast.Load()),
attr=dtype_str, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 2:
name_arg = ast.keyword(arg="name",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(name_arg)
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "cast"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "cast"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.cast(..., dtype=tf.%s)." % (full_name,
dtype_str)))
return node
def _softmax_cross_entropy_with_logits_transformer(
parent, node, full_name, name, logs):
"""Wrap labels argument with stop_gradients."""
def _wrap_label(parent, old_value):
"""Wrap labels with tf.stop_gradient."""
if six.PY3:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [])
else:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [], None, None)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Check if we have a labels keyword arg
for karg in node.keywords:
if karg.arg == "labels":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing labels arg of "
"tf.nn.softmax_cross_entropy_with_logits to "
"tf.stop_gradient(labels). Please check this "
"transformation.\n"))
_wrap_label(karg, karg.value)
return node
return node
def _image_resize_transformer(parent, node, full_name, name, logs):
"""Transforms image.resize_* to image.resize(..., method=*, ...)."""
resize_method = name[7:].upper()
new_arg = ast.keyword(arg="method",
value=ast.Attribute(
value=ast.Attribute(
value=ast.Attribute(
value=ast.Name(id="tf", ctx=ast.Load()),
attr="image", ctx=ast.Load()),
attr="ResizeMethod", ctx=ast.Load()),
attr=resize_method, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 4:
pos_arg = ast.keyword(arg="preserve_aspect_ratio",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(pos_arg)
if len(node.args) == 3:
pos_arg = ast.keyword(arg="align_corners",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(pos_arg)
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "resize"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "resize"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.image.resize(..., "
"method=tf.image.ResizeMethod.%s)." % (full_name,
resize_method)))
return node
def _pool_seed_transformer(parent, node, full_name, name, logs):
"""Removes seed2 and deterministic, and adds non-zero seed if needed."""
# This requires that this function uses all kwargs (add to renames!).
seed_arg = None
deterministic = False
modified = False
new_keywords = []
for kw in node.keywords:
if sys.version_info[:2] >= (3, 5) and isinstance(kw, ast.Starred):
pass
elif kw.arg == "seed":
seed_arg = kw
elif kw.arg == "seed2" or kw.arg == "deterministic":
lineno = getattr(kw, "lineno", node.lineno)
col_offset = getattr(kw, "col_offset", node.col_offset)
logs.append((ast_edits.INFO, lineno, col_offset,
"Removed argument %s for function %s" % (
kw.arg, full_name or name)))
if kw.arg == "deterministic":
if not _is_ast_false(kw.value):
deterministic = True
modified = True
continue
new_keywords.append(kw)
if deterministic:
if seed_arg is None:
new_keywords.append(ast.keyword(arg="seed", value=ast.Num(42)))
logs.add((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding seed=42 to call to %s since determinism was requested" % (
full_name or name)
))
else:
logs.add((
ast_edits.WARNING, node.lineno, node.col_offset,
"The deterministic argument is deprecated for %s, pass a "
"non-zero seed for determinism. The deterministic argument is "
"present, possibly not False, and the seed is already set. The "
"converter cannot determine whether it is nonzero, please check."
))
if modified:
node.keywords = new_keywords
return node
else:
return
def _extract_glimpse_transformer(parent, node, full_name, name, logs):
def _replace_uniform_noise_node(parent, old_value):
"""Replaces old_value with 'uniform' or 'guassian'."""
uniform = ast.Str(s="uniform")
gaussian = ast.Str(s="gaussian")
new_value = ast.IfExp(body=uniform, test=old_value, orelse=gaussian)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around noise.value.test (and remove the old prefix/
# suffix, they should only be around new_value.test), so that:
# "uniform" if (a if b else c) else "gaussian" is valid.
pasta.base.formatting.set(new_value.test, "prefix", "(")
pasta.base.formatting.set(new_value.test, "suffix", ")")
# Check if we have a uniform_noise keyword arg
for uniform_noise in node.keywords:
if uniform_noise.arg == "uniform_noise":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse "
"to noise, and recomputing value. Please check this "
"transformation.\n"))
uniform_noise.arg = "noise"
value = "uniform" if uniform_noise.value else "gaussian"
_replace_uniform_noise_node(uniform_noise, uniform_noise.value)
return node
# Since `noise`/`uniform_noise` is optional arg, nothing needs to be
# done if len(node.args) < 5.
if len(node.args) >= 5:
_replace_uniform_noise_node(node, node.args[5])
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse to "
"noise, and recomputing value.\n"))
return node
|
jendap/tensorflow
|
tensorflow/tools/compatibility/tf_upgrade_v2.py
|
Python
|
apache-2.0
| 64,405
|
# -*- coding: utf-8 -*-
# stdlib
import random
import time
import unittest
# 3p
from nose.plugins.attrib import attr
import nose.tools as nt
# project
from aggregator import DEFAULT_HISTOGRAM_AGGREGATES
from dogstatsd import MetricsBucketAggregator
@attr(requires='core_integration')
class TestUnitMetricsBucketAggregator(unittest.TestCase):
BUCKET_BOUNDARY_TOLERANCE = 0.1
def setUp(self):
self.interval = 1
@staticmethod
def sort_metrics(metrics):
def sort_by(m):
return (m['metric'], ','.join(m['tags'] or []))
return sorted(metrics, key=sort_by)
@staticmethod
def sort_events(metrics):
def sort_by(m):
return (m['msg_title'], m['msg_text'], ','.join(m.get('tags', None) or []))
return sorted(metrics, key=sort_by)
def sleep_for_interval_length(self, interval=None):
start_time = time.time()
sleep_interval = interval or self.interval
time.sleep(sleep_interval)
# Make sure that we've slept at least for the interval length
while time.time() < start_time + sleep_interval:
time.sleep(start_time + sleep_interval - time.time())
def wait_for_bucket_boundary(self, interval=None):
i = interval or self.interval
while time.time() % i > self.BUCKET_BOUNDARY_TOLERANCE:
pass
@staticmethod
def assert_almost_equal(i, j, e=1):
# Floating point math?
assert abs(i - j) <= e, "%s %s %s" % (i, j, e)
def test_counter_normalization(self):
ag_interval = 10
stats = MetricsBucketAggregator('myhost', interval=ag_interval)
# Assert counters are normalized.
stats.submit_packets('int:1|c')
stats.submit_packets('int:4|c')
stats.submit_packets('int:15|c')
stats.submit_packets('float:5|c')
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
assert len(metrics) == 2
floatc, intc = metrics
nt.assert_equal(floatc['metric'], 'float')
nt.assert_equal(floatc['points'][0][1], 0.5)
nt.assert_equal(floatc['host'], 'myhost')
nt.assert_equal(intc['metric'], 'int')
nt.assert_equal(intc['points'][0][1], 2)
nt.assert_equal(intc['host'], 'myhost')
def test_histogram_normalization(self):
ag_interval = 10
# The min is not enabled by default
stats = MetricsBucketAggregator('myhost', interval=ag_interval,
histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES+['min'])
for i in range(5):
stats.submit_packets('h1:1|h')
for i in range(20):
stats.submit_packets('h2:1|h')
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
_, _, h1count, _, _, _, _, _, h2count, _, _, _ = metrics
nt.assert_equal(h1count['points'][0][1], 0.5)
nt.assert_equal(h2count['points'][0][1], 2)
def test_tags(self):
stats = MetricsBucketAggregator('myhost', interval=self.interval)
stats.submit_packets('gauge:1|c')
stats.submit_packets('gauge:2|c|@1')
stats.submit_packets('gauge:4|c|#tag1,tag2')
stats.submit_packets('gauge:8|c|#tag2,tag1') # Should be the same as above
stats.submit_packets('gauge:16|c|#tag3,tag4')
self.sleep_for_interval_length()
metrics = self.sort_metrics(stats.flush())
assert len(metrics) == 3
first, second, third = metrics
nt.assert_equal(first['metric'], 'gauge')
nt.assert_equal(first['tags'], None)
nt.assert_equal(first['points'][0][1], 3)
nt.assert_equal(first['host'], 'myhost')
nt.assert_equal(second['metric'], 'gauge')
nt.assert_equal(second['tags'], ('tag1', 'tag2'))
nt.assert_equal(second['points'][0][1], 12)
nt.assert_equal(second['host'], 'myhost')
nt.assert_equal(third['metric'], 'gauge')
nt.assert_equal(third['tags'], ('tag3', 'tag4'))
nt.assert_equal(third['points'][0][1], 16)
nt.assert_equal(third['host'], 'myhost')
def test_tags_gh442(self):
import dogstatsd
from aggregator import api_formatter
serialized = dogstatsd.serialize_metrics([api_formatter("foo", 12, 1, ('tag',), 'host')], "test-host")
self.assertTrue('"tags": ["tag"]' in serialized[0], serialized)
def test_counter(self):
ag_interval = 1.0
stats = MetricsBucketAggregator('myhost', interval=ag_interval)
# Track some counters.
stats.submit_packets('my.first.counter:1|c')
stats.submit_packets('my.first.counter:5|c')
stats.submit_packets('my.second.counter:1|c')
stats.submit_packets('my.third.counter:3|c')
# Ensure they roll up nicely.
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
assert len(metrics) == 3
first, second, third = metrics
nt.assert_equals(first['metric'], 'my.first.counter')
nt.assert_equals(first['points'][0][1], 6)
nt.assert_equals(first['host'], 'myhost')
nt.assert_equals(second['metric'], 'my.second.counter')
nt.assert_equals(second['points'][0][1], 1)
nt.assert_equals(third['metric'], 'my.third.counter')
nt.assert_equals(third['points'][0][1], 3)
self.sleep_for_interval_length(ag_interval)
# Ensure that counters reset to zero.
metrics = self.sort_metrics(stats.flush())
first, second, third = metrics
nt.assert_equals(first['metric'], 'my.first.counter')
nt.assert_equals(first['points'][0][1], 0)
nt.assert_equals(second['metric'], 'my.second.counter')
nt.assert_equals(second['points'][0][1], 0)
nt.assert_equals(third['metric'], 'my.third.counter')
nt.assert_equals(third['points'][0][1], 0)
def test_empty_counter(self):
ag_interval = self.interval
stats = MetricsBucketAggregator('myhost', interval=ag_interval)
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
# Should be an empty list
nt.assert_equals(len(metrics), 0)
# Track some counters.
stats.submit_packets('my.first.counter:%s|c' % (1 * ag_interval))
# Call flush before the bucket_length has been exceeded
metrics = self.sort_metrics(stats.flush())
# Should be an empty list
nt.assert_equals(len(metrics), 0)
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
# Should now have the data
nt.assert_equals(len(metrics), 1)
nt.assert_equals(metrics[0]['metric'], 'my.first.counter')
nt.assert_equals(metrics[0]['points'][0][1], 1)
def test_counter_buckets(self):
ag_interval = 5
stats = MetricsBucketAggregator('myhost', interval=ag_interval)
self.wait_for_bucket_boundary(ag_interval)
# Track some counters.
stats.submit_packets("my.first.counter:%s|c" % (1 * ag_interval))
stats.submit_packets("my.second.counter:%s|c" % (1 * ag_interval))
stats.submit_packets("my.third.counter:%s|c" % (3 * ag_interval))
time.sleep(ag_interval)
stats.submit_packets("my.first.counter:%s|c" % (5 * ag_interval))
# Want to get 2 different entries for my.first.counter in one set of metrics,
# so wait for the time bucket interval to pass
self.sleep_for_interval_length(ag_interval)
# Ensure they roll up nicely.
metrics = self.sort_metrics(stats.flush())
nt.assert_equals(len(metrics), 6)
first, first_b, second, second_b, third, third_b = metrics
nt.assert_equals(first['metric'], 'my.first.counter')
nt.assert_equals(first['points'][0][1], 1)
nt.assert_equals(first['host'], 'myhost')
nt.assert_equals(first_b['metric'], 'my.first.counter')
nt.assert_equals(first_b['points'][0][1], 5)
nt.assert_equals(first_b['points'][0][0] - first['points'][0][0], ag_interval)
nt.assert_equals(first['points'][0][0] % ag_interval, 0)
nt.assert_equals(first_b['points'][0][0] % ag_interval, 0)
nt.assert_equals(second['metric'], 'my.second.counter')
nt.assert_equals(second['points'][0][1], 1)
nt.assert_equals(second_b['metric'], 'my.second.counter')
nt.assert_equals(second_b['points'][0][1], 0)
nt.assert_equals(third['metric'], 'my.third.counter')
nt.assert_equals(third['points'][0][1], 3)
nt.assert_equals(third_b['metric'], 'my.third.counter')
nt.assert_equals(third_b['points'][0][1], 0)
metrics = self.sort_metrics(stats.flush())
nt.assert_equals(len(metrics), 0)
self.sleep_for_interval_length(ag_interval)
# Ensure that counters reset to zero.
metrics = self.sort_metrics(stats.flush())
first, second, third = metrics
nt.assert_equals(first['metric'], 'my.first.counter')
nt.assert_equals(first['points'][0][1], 0)
nt.assert_equals(second['metric'], 'my.second.counter')
nt.assert_equals(second['points'][0][1], 0)
nt.assert_equals(third['metric'], 'my.third.counter')
nt.assert_equals(third['points'][0][1], 0)
def test_counter_flush_during_bucket(self):
ag_interval = 5
stats = MetricsBucketAggregator('myhost', interval=ag_interval)
self.wait_for_bucket_boundary(ag_interval)
time.sleep(0.5)
# Track some counters.
stats.submit_packets("my.first.counter:%s|c" % (1 * ag_interval))
stats.submit_packets("my.second.counter:%s|c" % (1 * ag_interval))
stats.submit_packets("my.third.counter:%s|c" % (3 * ag_interval))
time.sleep(ag_interval)
stats.submit_packets("my.first.counter:%s|c" % (5 * ag_interval))
# Want to get the date from the 2 buckets in 2 differnt calls, so don't wait for
# the bucket interval to pass
metrics = self.sort_metrics(stats.flush())
nt.assert_equals(len(metrics), 3)
first, second, third = metrics
nt.assert_equals(first['metric'], 'my.first.counter')
nt.assert_equals(first['points'][0][1], 1)
nt.assert_equals(first['host'], 'myhost')
nt.assert_equals(second['metric'], 'my.second.counter')
nt.assert_equals(second['points'][0][1], 1)
nt.assert_equals(third['metric'], 'my.third.counter')
nt.assert_equals(third['points'][0][1], 3)
#Now wait for the bucket interval to pass, and get the other points
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
nt.assert_equals(len(metrics), 3)
first, second, third = metrics
nt.assert_equals(first['metric'], 'my.first.counter')
nt.assert_equals(first['points'][0][1], 5)
nt.assert_equals(first['host'], 'myhost')
nt.assert_equals(second['metric'], 'my.second.counter')
nt.assert_equals(second['points'][0][1], 0)
nt.assert_equals(third['metric'], 'my.third.counter')
nt.assert_equals(third['points'][0][1], 0)
self.sleep_for_interval_length(ag_interval)
# Ensure that counters reset to zero.
metrics = self.sort_metrics(stats.flush())
nt.assert_equals(len(metrics), 3)
first, second, third = metrics
nt.assert_equals(first['metric'], 'my.first.counter')
nt.assert_equals(first['points'][0][1], 0)
nt.assert_equals(second['metric'], 'my.second.counter')
nt.assert_equals(second['points'][0][1], 0)
nt.assert_equals(third['metric'], 'my.third.counter')
nt.assert_equals(third['points'][0][1], 0)
self.sleep_for_interval_length(ag_interval)
# Ensure that counters reset to zero.
metrics = self.sort_metrics(stats.flush())
nt.assert_equals(len(metrics), 3)
first, second, third = metrics
nt.assert_equals(first['metric'], 'my.first.counter')
nt.assert_equals(first['points'][0][1], 0)
nt.assert_equals(second['metric'], 'my.second.counter')
nt.assert_equals(second['points'][0][1], 0)
nt.assert_equals(third['metric'], 'my.third.counter')
nt.assert_equals(third['points'][0][1], 0)
def test_sampled_counter(self):
# Submit a sampled counter.
stats = MetricsBucketAggregator('myhost', interval=self.interval)
stats.submit_packets('sampled.counter:1|c|@0.5')
self.sleep_for_interval_length()
metrics = stats.flush()
assert len(metrics) == 1
m = metrics[0]
assert m['metric'] == 'sampled.counter'
nt.assert_equal(m['points'][0][1], 2)
def test_gauge(self):
ag_interval = 2
stats = MetricsBucketAggregator('myhost', interval=ag_interval)
self.wait_for_bucket_boundary(ag_interval)
# Track some counters.
stats.submit_packets('my.first.gauge:1|g')
stats.submit_packets('my.first.gauge:5|g')
stats.submit_packets('my.second.gauge:1.5|g')
# Ensure that gauges roll up correctly.
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
nt.assert_equals(len(metrics), 2)
first, second = metrics
nt.assert_equals(first['metric'], 'my.first.gauge')
nt.assert_equals(first['points'][0][1], 5)
nt.assert_equals(first['host'], 'myhost')
nt.assert_equals(second['metric'], 'my.second.gauge')
nt.assert_equals(second['points'][0][1], 1.5)
# Ensure that old gauges get dropped due to old timestamps
stats.submit_metric('my.first.gauge', 5, 'g')
stats.submit_metric('my.first.gauge', 1, 'g', timestamp=1000000000)
stats.submit_metric('my.second.gauge', 20, 'g', timestamp=1000000000)
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
nt.assert_equals(len(metrics), 1)
first = metrics[0]
nt.assert_equals(first['metric'], 'my.first.gauge')
nt.assert_equals(first['points'][0][1], 5)
nt.assert_equals(first['host'], 'myhost')
def test_gauge_buckets(self):
# Tests calling returing data from 2 time buckets
ag_interval = self.interval
stats = MetricsBucketAggregator('myhost', interval=ag_interval)
self.wait_for_bucket_boundary(ag_interval)
# Track some counters.
stats.submit_packets('my.first.gauge:1|g')
stats.submit_packets('my.first.gauge:5|g')
stats.submit_packets('my.second.gauge:1.5|g')
self.sleep_for_interval_length(ag_interval)
stats.submit_packets('my.second.gauge:9.5|g')
# Ensure that gauges roll up correctly.
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
nt.assert_equals(len(metrics), 3)
first, second, second_b = metrics
nt.assert_equals(first['metric'], 'my.first.gauge')
nt.assert_equals(first['points'][0][1], 5)
nt.assert_equals(first['host'], 'myhost')
nt.assert_equals(second_b['metric'], 'my.second.gauge')
nt.assert_equals(second_b['points'][0][1], 9.5)
nt.assert_equals(second['metric'], 'my.second.gauge')
nt.assert_equals(second['points'][0][1], 1.5)
#check that they come back empty
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
nt.assert_equals(len(metrics), 0)
def test_gauge_flush_during_bucket(self):
#Tests returning data when flush is called in the middle of a time bucket that has data
ag_interval = self.interval
stats = MetricsBucketAggregator('myhost', interval=ag_interval)
self.wait_for_bucket_boundary(ag_interval)
# Track some counters.
stats.submit_packets('my.first.gauge:1|g')
stats.submit_packets('my.first.gauge:5|g')
stats.submit_packets('my.second.gauge:1.5|g')
self.sleep_for_interval_length(ag_interval)
stats.submit_packets('my.second.gauge:9.5|g')
# Ensure that gauges roll up correctly.
metrics = self.sort_metrics(stats.flush())
nt.assert_equals(len(metrics), 2)
first, second = metrics
nt.assert_equals(first['metric'], 'my.first.gauge')
nt.assert_equals(first['points'][0][1], 5)
nt.assert_equals(first['host'], 'myhost')
nt.assert_equals(second['metric'], 'my.second.gauge')
nt.assert_equals(second['points'][0][1], 1.5)
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
nt.assert_equals(len(metrics), 1)
nt.assert_equals(second['metric'], 'my.second.gauge')
nt.assert_equals(second['points'][0][1], 1.5)
def test_sets(self):
stats = MetricsBucketAggregator('myhost', interval=self.interval)
stats.submit_packets('my.set:10|s')
stats.submit_packets('my.set:20|s')
stats.submit_packets('my.set:20|s')
stats.submit_packets('my.set:30|s')
stats.submit_packets('my.set:30|s')
stats.submit_packets('my.set:30|s')
# Assert that it's treated normally.
self.sleep_for_interval_length()
metrics = stats.flush()
nt.assert_equal(len(metrics), 1)
m = metrics[0]
nt.assert_equal(m['metric'], 'my.set')
nt.assert_equal(m['points'][0][1], 3)
# Assert there are no more sets
assert not stats.flush()
def test_string_sets(self):
stats = MetricsBucketAggregator('myhost', interval=self.interval)
stats.submit_packets('my.set:string|s')
stats.submit_packets('my.set:sets|s')
stats.submit_packets('my.set:sets|s')
stats.submit_packets('my.set:test|s')
stats.submit_packets('my.set:test|s')
stats.submit_packets('my.set:test|s')
# Assert that it's treated normally.
self.sleep_for_interval_length()
metrics = stats.flush()
nt.assert_equal(len(metrics), 1)
m = metrics[0]
nt.assert_equal(m['metric'], 'my.set')
nt.assert_equal(m['points'][0][1], 3)
# Assert there are no more sets
assert not stats.flush()
self.sleep_for_interval_length()
assert not stats.flush()
def test_sets_buckets(self):
stats = MetricsBucketAggregator('myhost', interval=self.interval)
stats.submit_packets('my.set:10|s')
stats.submit_packets('my.set:20|s')
stats.submit_packets('my.set:20|s')
stats.submit_packets('my.set:30|s')
stats.submit_packets('my.set:30|s')
stats.submit_packets('my.set:30|s')
self.sleep_for_interval_length()
stats.submit_packets('my.set:40|s')
# Assert that it's treated normally.
self.sleep_for_interval_length()
metrics = stats.flush()
nt.assert_equal(len(metrics), 2)
m, m2 = metrics
nt.assert_equal(m['metric'], 'my.set')
nt.assert_equal(m['points'][0][1], 3)
nt.assert_equal(m2['metric'], 'my.set')
nt.assert_equal(m2['points'][0][1], 1)
# Assert there are no more sets
assert not stats.flush()
def test_sets_flush_during_bucket(self):
ag_interval = self.interval
stats = MetricsBucketAggregator('myhost', interval=ag_interval)
self.wait_for_bucket_boundary(ag_interval)
stats.submit_packets('my.set:10|s')
stats.submit_packets('my.set:20|s')
stats.submit_packets('my.set:20|s')
stats.submit_packets('my.set:30|s')
stats.submit_packets('my.set:30|s')
stats.submit_packets('my.set:30|s')
self.sleep_for_interval_length(ag_interval)
stats.submit_packets('my.set:40|s')
# Assert that it's treated normally.
metrics = stats.flush()
nt.assert_equal(len(metrics), 1)
m = metrics[0]
nt.assert_equal(m['metric'], 'my.set')
nt.assert_equal(m['points'][0][1], 3)
self.sleep_for_interval_length(ag_interval)
metrics = stats.flush()
m = metrics[0]
nt.assert_equal(m['metric'], 'my.set')
nt.assert_equal(m['points'][0][1], 1)
# Assert there are no more sets
assert not stats.flush()
def test_gauge_sample_rate(self):
ag_interval = self.interval
stats = MetricsBucketAggregator('myhost', interval=ag_interval)
# Submit a sampled gauge metric.
stats.submit_packets('sampled.gauge:10|g|@0.1')
# Assert that it's treated normally.
self.sleep_for_interval_length(ag_interval)
metrics = stats.flush()
nt.assert_equal(len(metrics), 1)
m = metrics[0]
nt.assert_equal(m['metric'], 'sampled.gauge')
nt.assert_equal(m['points'][0][1], 10)
def test_histogram(self):
ag_interval = self.interval
# The min is not enabled by default
stats = MetricsBucketAggregator('myhost', interval=ag_interval,
histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES+['min'])
self.wait_for_bucket_boundary(ag_interval)
# Sample all numbers between 1-100 many times. This
# means our percentiles should be relatively close to themselves.
percentiles = range(100)
random.shuffle(percentiles) # in place
for i in percentiles:
for j in xrange(20):
for type_ in ['h', 'ms']:
m = 'my.p:%s|%s' % (i, type_)
stats.submit_packets(m)
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
nt.assert_equal(len(metrics), 6)
p95, pavg, pcount, pmax, pmed, pmin = self.sort_metrics(metrics)
nt.assert_equal(p95['metric'], 'my.p.95percentile')
self.assert_almost_equal(p95['points'][0][1], 95, 10)
self.assert_almost_equal(pmax['points'][0][1], 99, 1)
self.assert_almost_equal(pmed['points'][0][1], 50, 2)
self.assert_almost_equal(pavg['points'][0][1], 50, 2)
self.assert_almost_equal(pmin['points'][0][1], 1, 1)
nt.assert_equals(pcount['points'][0][1], 4000) # 100 * 20 * 2
nt.assert_equals(p95['host'], 'myhost')
# Ensure that histograms are reset.
metrics = self.sort_metrics(stats.flush())
assert not metrics
def test_sampled_histogram(self):
# Submit a sampled histogram.
# The min is not enabled by default
stats = MetricsBucketAggregator(
'myhost',
interval=self.interval,
histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES+['min']
)
stats.submit_packets('sampled.hist:5|h|@0.5')
# Assert we scale up properly.
self.sleep_for_interval_length()
metrics = self.sort_metrics(stats.flush())
p95, pavg, pcount, pmax, pmed, pmin = self.sort_metrics(metrics)
nt.assert_equal(pcount['points'][0][1], 2)
for p in [p95, pavg, pmed, pmax, pmin]:
nt.assert_equal(p['points'][0][1], 5)
def test_histogram_buckets(self):
ag_interval = 1
# The min is not enabled by default
stats = MetricsBucketAggregator('myhost', interval=ag_interval,
histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES+['min'])
# Sample all numbers between 1-100 many times. This
# means our percentiles should be relatively close to themselves.
self.wait_for_bucket_boundary(ag_interval)
percentiles = range(100)
random.shuffle(percentiles) # in place
for i in percentiles:
for j in xrange(20):
for type_ in ['h', 'ms']:
m = 'my.p:%s|%s' % (i, type_)
stats.submit_packets(m)
time.sleep(self.BUCKET_BOUNDARY_TOLERANCE) # Make sure that we're waiting for the _next_ bucket boundary
self.wait_for_bucket_boundary(ag_interval)
percentiles = range(50)
random.shuffle(percentiles) # in place
for i in percentiles:
for j in xrange(20):
for type_ in ['h', 'ms']:
m = 'my.p:%s|%s' % (i, type_)
stats.submit_packets(m)
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
nt.assert_equal(len(metrics), 12)
p95, p95_b, pavg, pavg_b, pcount, pcount_b, pmax, pmax_b, pmed, pmed_b, pmin, pmin_b = self.sort_metrics(metrics)
nt.assert_equal(p95['metric'], 'my.p.95percentile')
self.assert_almost_equal(p95['points'][0][1], 95, 10)
self.assert_almost_equal(pmax['points'][0][1], 99, 1)
self.assert_almost_equal(pmed['points'][0][1], 50, 2)
self.assert_almost_equal(pavg['points'][0][1], 50, 2)
self.assert_almost_equal(pmin['points'][0][1], 1, 1)
nt.assert_equals(pcount['points'][0][1], 4000) # 100 * 20 * 2
nt.assert_equal(p95_b['metric'], 'my.p.95percentile')
self.assert_almost_equal(p95_b['points'][0][1], 47, 10)
self.assert_almost_equal(pmax_b['points'][0][1], 49, 1)
self.assert_almost_equal(pmed_b['points'][0][1], 25, 2)
self.assert_almost_equal(pavg_b['points'][0][1], 25, 2)
self.assert_almost_equal(pmin_b['points'][0][1], 1, 1)
nt.assert_equals(pcount_b['points'][0][1], 2000) # 100 * 20 * 2
nt.assert_equals(p95['host'], 'myhost')
# Ensure that histograms are reset.
metrics = self.sort_metrics(stats.flush())
assert not metrics
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
assert not metrics
def test_histogram_flush_during_bucket(self):
ag_interval = 1
# The min is not enabled by default
stats = MetricsBucketAggregator('myhost', interval=ag_interval,
histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES+['min'])
# Sample all numbers between 1-100 many times. This
# means our percentiles should be relatively close to themselves.
self.wait_for_bucket_boundary(ag_interval)
percentiles = range(100)
random.shuffle(percentiles) # in place
for i in percentiles:
for j in xrange(20):
for type_ in ['h', 'ms']:
m = 'my.p:%s|%s' % (i, type_)
stats.submit_packets(m)
time.sleep(self.BUCKET_BOUNDARY_TOLERANCE) # Make sure that we'll wait for the _next_ bucket boundary
self.wait_for_bucket_boundary(ag_interval)
percentiles = range(50)
random.shuffle(percentiles) # in place
for i in percentiles:
for j in xrange(20):
for type_ in ['h', 'ms']:
m = 'my.p:%s|%s' % (i, type_)
stats.submit_packets(m)
metrics = self.sort_metrics(stats.flush())
nt.assert_equal(len(metrics), 6)
p95, pavg, pcount, pmax, pmed, pmin = self.sort_metrics(metrics)
nt.assert_equal(p95['metric'], 'my.p.95percentile')
self.assert_almost_equal(p95['points'][0][1], 95, 10)
self.assert_almost_equal(pmax['points'][0][1], 99, 1)
self.assert_almost_equal(pmed['points'][0][1], 50, 2)
self.assert_almost_equal(pavg['points'][0][1], 50, 2)
self.assert_almost_equal(pmin['points'][0][1], 1, 1)
nt.assert_equal(pcount['points'][0][1], 4000) # 100 * 20 * 2
nt.assert_equals(p95['host'], 'myhost')
self.sleep_for_interval_length()
metrics = self.sort_metrics(stats.flush())
nt.assert_equal(len(metrics), 6)
p95_b, pavg_b, pcount_b, pmax_b, pmed_b, pmin_b = self.sort_metrics(metrics)
nt.assert_equal(p95_b['metric'], 'my.p.95percentile')
self.assert_almost_equal(p95_b['points'][0][1], 47, 10)
self.assert_almost_equal(pmax_b['points'][0][1], 49, 1)
self.assert_almost_equal(pmed_b['points'][0][1], 25, 2)
self.assert_almost_equal(pavg_b['points'][0][1], 25, 2)
self.assert_almost_equal(pmin_b['points'][0][1], 1, 1)
nt.assert_equals(pcount_b['points'][0][1], 2000) # 100 * 20 * 2
# Ensure that histograms are reset.
metrics = self.sort_metrics(stats.flush())
assert not metrics
def test_batch_submission(self):
# Submit a sampled histogram.
stats = MetricsBucketAggregator('myhost', interval=self.interval)
metrics = [
'counter:1|c',
'counter:1|c',
'gauge:1|g'
]
packet = "\n".join(metrics)
stats.submit_packets(packet)
self.sleep_for_interval_length()
metrics = self.sort_metrics(stats.flush())
nt.assert_equal(2, len(metrics))
counter, gauge = metrics
assert counter['points'][0][1] == 2
assert gauge['points'][0][1] == 1
def test_bad_packets_throw_errors(self):
packets = [
'missing.value.and.type',
'missing.type:2',
'missing.value|c',
'2|c',
'unknown.type:2|z',
'string.value:abc|c',
'string.sample.rate:0|c|@abc',
# Bad event-like packets
'_ev{1,2}:bad_header'
'_e{1,}:invalid|headers',
'_e:missing|size|headers',
'_e:{1,1}:t|t|t:bad_meta|h',
]
stats = MetricsBucketAggregator('myhost', interval=self.interval)
for packet in packets:
try:
stats.submit_packets(packet)
except Exception:
assert True
else:
assert False, 'invalid : %s' % packet
def test_metrics_expiry(self):
# Ensure metrics eventually expire and stop submitting.
ag_interval = self.interval
expiry = ag_interval * 5 + 2
# The min is not enabled by default
stats = MetricsBucketAggregator('myhost', interval=ag_interval,
expiry_seconds=expiry,
histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES+['min'])
stats.submit_packets('test.counter:123|c')
stats.submit_packets('test.gauge:55|g')
stats.submit_packets('test.set:44|s')
stats.submit_packets('test.histogram:11|h')
submit_time = time.time()
submit_bucket_timestamp = submit_time - (submit_time % ag_interval)
# Ensure points keep submitting
self.sleep_for_interval_length()
metrics = self.sort_metrics(stats.flush())
nt.assert_equal(len(metrics), 9)
nt.assert_equal(metrics[0]['metric'], 'test.counter')
nt.assert_equal(metrics[0]['points'][0][1], 123)
nt.assert_equal(metrics[0]['points'][0][0], submit_bucket_timestamp)
#flush without waiting - should get nothing
metrics = self.sort_metrics(stats.flush())
assert not metrics, str(metrics)
#Don't sumbit anything
submit_time = time.time()
bucket_timestamp = submit_time - (submit_time % ag_interval)
self.sleep_for_interval_length()
metrics = self.sort_metrics(stats.flush())
nt.assert_equal(len(metrics), 1)
nt.assert_equal(metrics[0]['metric'], 'test.counter')
nt.assert_equal(metrics[0]['points'][0][1], 0)
nt.assert_equal(metrics[0]['points'][0][0], bucket_timestamp)
stats.submit_packets('test.gauge:5|g')
self.sleep_for_interval_length()
time.sleep(0.3)
metrics = self.sort_metrics(stats.flush())
nt.assert_equal(len(metrics), 2)
nt.assert_equal(metrics[0]['metric'], 'test.counter')
nt.assert_equal(metrics[0]['points'][0][1], 0)
nt.assert_equal(metrics[1]['metric'], 'test.gauge')
nt.assert_equal(metrics[1]['points'][0][1], 5)
#flush without waiting - should get nothing
metrics = self.sort_metrics(stats.flush())
assert not metrics, str(metrics)
self.sleep_for_interval_length()
metrics = self.sort_metrics(stats.flush())
nt.assert_equal(len(metrics), 1)
nt.assert_equal(metrics[0]['metric'], 'test.counter')
nt.assert_equal(metrics[0]['points'][0][1], 0)
# Now sleep for longer than the expiry window and ensure
# no points are submitted
self.sleep_for_interval_length()
time.sleep(2)
m = stats.flush()
assert not m, str(m)
# If we submit again, we're all good.
stats.submit_packets('test.counter:123|c')
stats.submit_packets('test.gauge:55|g')
stats.submit_packets('test.set:44|s')
stats.submit_packets('test.histogram:11|h')
self.sleep_for_interval_length()
metrics = self.sort_metrics(stats.flush())
nt.assert_equal(len(metrics), 9)
nt.assert_equal(metrics[0]['metric'], 'test.counter')
nt.assert_equal(metrics[0]['points'][0][1], 123)
def test_diagnostic_stats(self):
stats = MetricsBucketAggregator('myhost', interval=self.interval)
for i in xrange(10):
stats.submit_packets('metric:10|c')
stats.send_packet_count('datadog.dogstatsd.packet.count')
self.sleep_for_interval_length()
metrics = self.sort_metrics(stats.flush())
nt.assert_equals(2, len(metrics))
first, second = metrics
nt.assert_equal(first['metric'], 'datadog.dogstatsd.packet.count')
nt.assert_equal(first['points'][0][1], 10)
def test_histogram_counter(self):
# Test whether histogram.count == increment
# same deal with a sample rate
ag_interval = self.interval
cnt = 100000
for run in [1, 2]:
stats = MetricsBucketAggregator('myhost', interval=ag_interval)
for i in xrange(cnt):
if run == 2:
stats.submit_packets('test.counter:1|c|@0.5')
stats.submit_packets('test.hist:1|ms|@0.5')
else:
stats.submit_packets('test.counter:1|c')
stats.submit_packets('test.hist:1|ms')
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
assert len(metrics) > 0
#depending on timing, some runs may return the metric more that one bucket, meaning there may be
# more than one 'metric' for each of the counters
counter_count = 0
hist_count = 0
for num in [m['points'][0][1] for m in metrics if m['metric'] == 'test.counter']:
counter_count += num
for num in [m['points'][0][1] for m in metrics if m['metric'] == 'test.hist.count']:
hist_count += num
nt.assert_equal(counter_count, cnt * run)
nt.assert_equal(hist_count, cnt * run)
def test_scientific_notation(self):
ag_interval = 10
stats = MetricsBucketAggregator('myhost', interval=ag_interval)
stats.submit_packets('test.scinot:9.512901e-05|g')
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
assert len(metrics) == 1
ts, val = metrics[0].get('points')[0]
nt.assert_almost_equal(val, 9.512901e-05)
def test_event_tags(self):
stats = MetricsBucketAggregator('myhost', interval=self.interval)
stats.submit_packets('_e{6,4}:title1|text')
stats.submit_packets('_e{6,4}:title2|text|#t1')
stats.submit_packets('_e{6,4}:title3|text|#t1,t2:v2,t3,t4')
stats.submit_packets('_e{6,4}:title4|text|k:key|p:normal|#t1,t2')
events = self.sort_events(stats.flush_events())
assert len(events) == 4
first, second, third, fourth = events
try:
first['tags']
except Exception:
assert True
else:
assert False, "event['tags'] shouldn't be defined when no tags aren't explicited in the packet"
nt.assert_equal(first['msg_title'], 'title1')
nt.assert_equal(first['msg_text'], 'text')
nt.assert_equal(second['msg_title'], 'title2')
nt.assert_equal(second['msg_text'], 'text')
nt.assert_equal(second['tags'], sorted(['t1']))
nt.assert_equal(third['msg_title'], 'title3')
nt.assert_equal(third['msg_text'], 'text')
nt.assert_equal(third['tags'], sorted(['t1', 't2:v2', 't3', 't4']))
nt.assert_equal(fourth['msg_title'], 'title4')
nt.assert_equal(fourth['msg_text'], 'text')
nt.assert_equal(fourth['aggregation_key'], 'key')
nt.assert_equal(fourth['priority'], 'normal')
nt.assert_equal(fourth['tags'], sorted(['t1', 't2']))
def test_event_title(self):
stats = MetricsBucketAggregator('myhost', interval=self.interval)
stats.submit_packets('_e{0,4}:|text')
stats.submit_packets(u'_e{9,4}:2intitulé|text')
stats.submit_packets('_e{14,4}:3title content|text')
stats.submit_packets('_e{14,4}:4title|content|text')
stats.submit_packets('_e{13,4}:5title\\ntitle|text') # \n stays escaped
events = self.sort_events(stats.flush_events())
assert len(events) == 5
first, second, third, fourth, fifth = events
nt.assert_equal(first['msg_title'], '')
nt.assert_equal(second['msg_title'], u'2intitulé')
nt.assert_equal(third['msg_title'], '3title content')
nt.assert_equal(fourth['msg_title'], '4title|content')
nt.assert_equal(fifth['msg_title'], '5title\\ntitle')
def test_event_text(self):
stats = MetricsBucketAggregator('myhost', interval=self.interval)
stats.submit_packets('_e{2,0}:t1|')
stats.submit_packets('_e{2,12}:t2|text|content')
stats.submit_packets('_e{2,23}:t3|First line\\nSecond line') # \n is a newline
stats.submit_packets(u'_e{2,19}:t4|♬ †øU †øU ¥ºu T0µ ♪') # utf-8 compliant
events = self.sort_events(stats.flush_events())
assert len(events) == 4
first, second, third, fourth = events
nt.assert_equal(first['msg_text'], '')
nt.assert_equal(second['msg_text'], 'text|content')
nt.assert_equal(third['msg_text'], 'First line\nSecond line')
nt.assert_equal(fourth['msg_text'], u'♬ †øU †øU ¥ºu T0µ ♪')
def test_recent_point_threshold(self):
ag_interval = 1
threshold = 100
# The min is not enabled by default
stats = MetricsBucketAggregator(
'myhost',
recent_point_threshold=threshold,
interval=ag_interval,
histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES+['min']
)
timestamp_beyond_threshold = time.time() - threshold*2
# Ensure that old gauges get dropped due to old timestamps
stats.submit_metric('my.first.gauge', 5, 'g')
stats.submit_metric('my.first.gauge', 1, 'g', timestamp=timestamp_beyond_threshold)
stats.submit_metric('my.second.gauge', 20, 'g', timestamp=timestamp_beyond_threshold)
self.sleep_for_interval_length(ag_interval)
metrics = self.sort_metrics(stats.flush())
assert len(metrics) == 1
first = metrics[0]
nt.assert_equals(first['metric'], 'my.first.gauge')
nt.assert_equals(first['points'][0][1], 5)
nt.assert_equals(first['host'], 'myhost')
timestamp_within_threshold = time.time() - threshold/2
bucket_for_timestamp_within_threshold = timestamp_within_threshold - (timestamp_within_threshold % ag_interval)
stats.submit_metric('my.1.gauge', 5, 'g')
stats.submit_metric('my.1.gauge', 1, 'g', timestamp=timestamp_within_threshold)
stats.submit_metric('my.2.counter', 20, 'c', timestamp=timestamp_within_threshold)
stats.submit_metric('my.3.set', 20, 's', timestamp=timestamp_within_threshold)
stats.submit_metric('my.4.histogram', 20, 'h', timestamp=timestamp_within_threshold)
self.sleep_for_interval_length(ag_interval)
flush_timestamp = time.time()
# The bucket timestamp is the beginning of the bucket that ended before we flushed
bucket_timestamp = flush_timestamp - (flush_timestamp % ag_interval) - ag_interval
metrics = self.sort_metrics(stats.flush())
nt.assert_equal(len(metrics), 11)
first, first_b, second, second_b, third, h1, h2, h3, h4, h5, h6 = metrics
nt.assert_equals(first['metric'], 'my.1.gauge')
nt.assert_equals(first['points'][0][1], 1)
nt.assert_equals(first['host'], 'myhost')
self.assert_almost_equal(first['points'][0][0], bucket_for_timestamp_within_threshold, 0.1)
nt.assert_equals(first_b['metric'], 'my.1.gauge')
nt.assert_equals(first_b['points'][0][1], 5)
self.assert_almost_equal(first_b['points'][0][0], bucket_timestamp, 0.1)
nt.assert_equals(second['metric'], 'my.2.counter')
nt.assert_equals(second['points'][0][1], 20)
self.assert_almost_equal(second['points'][0][0], bucket_for_timestamp_within_threshold, 0.1)
nt.assert_equals(second_b['metric'], 'my.2.counter')
nt.assert_equals(second_b['points'][0][1], 0)
self.assert_almost_equal(second_b['points'][0][0], bucket_timestamp, 0.1)
nt.assert_equals(third['metric'], 'my.3.set')
nt.assert_equals(third['points'][0][1], 1)
self.assert_almost_equal(third['points'][0][0], bucket_for_timestamp_within_threshold, 0.1)
nt.assert_equals(h1['metric'], 'my.4.histogram.95percentile')
nt.assert_equals(h1['points'][0][1], 20)
self.assert_almost_equal(h1['points'][0][0], bucket_for_timestamp_within_threshold, 0.1)
nt.assert_equal(h1['points'][0][0], h2['points'][0][0])
nt.assert_equal(h1['points'][0][0], h3['points'][0][0])
nt.assert_equal(h1['points'][0][0], h4['points'][0][0])
nt.assert_equal(h1['points'][0][0], h5['points'][0][0])
def test_calculate_bucket_start(self):
stats = MetricsBucketAggregator('myhost', interval=10)
nt.assert_equal(stats.calculate_bucket_start(13284283), 13284280)
nt.assert_equal(stats.calculate_bucket_start(13284280), 13284280)
stats = MetricsBucketAggregator('myhost', interval=5)
nt.assert_equal(stats.calculate_bucket_start(13284287), 13284285)
nt.assert_equal(stats.calculate_bucket_start(13284280), 13284280)
|
WPMedia/dd-agent
|
tests/core/test_bucket_aggregator.py
|
Python
|
bsd-3-clause
| 42,702
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for verstraete_cirac.py."""
import unittest
from openfermion.hamiltonians import fermi_hubbard
from openfermion.linalg import get_sparse_operator, get_ground_state
from openfermion.transforms.opconversions import verstraete_cirac_2d_square
from openfermion.transforms.opconversions.verstraete_cirac import (
coordinates_to_snake_index, snake_index_to_coordinates,
stabilizer_local_2d_square)
class VerstraeteCirac2dSquareGroundStateTest(unittest.TestCase):
"""Test that the transform preserves desired ground state properties."""
def setUp(self):
self.x_dimension = 2
self.y_dimension = 3
# Create a Hamiltonian with nearest-neighbor hopping terms
self.ferm_op = fermi_hubbard(self.x_dimension, self.y_dimension, 1., 0.,
0., 0., False, True)
# Get the ground energy and ground state
self.ferm_op_sparse = get_sparse_operator(self.ferm_op)
self.ferm_op_ground_energy, self.ferm_op_ground_state = (
get_ground_state(self.ferm_op_sparse))
# Transform the FermionOperator to a QubitOperator
self.transformed_op = verstraete_cirac_2d_square(
self.ferm_op,
self.x_dimension,
self.y_dimension,
add_auxiliary_hamiltonian=True,
snake=False)
# Get the ground energy and state of the transformed operator
self.transformed_sparse = get_sparse_operator(self.transformed_op)
self.transformed_ground_energy, self.transformed_ground_state = (
get_ground_state(self.transformed_sparse))
def test_ground_energy(self):
"""Test that the transformation preserves the ground energy."""
self.assertAlmostEqual(self.transformed_ground_energy,
self.ferm_op_ground_energy)
class VerstraeteCirac2dSquareOperatorLocalityTest(unittest.TestCase):
"""Test that the transform results in local qubit operators."""
def setUp(self):
self.x_dimension = 6
self.y_dimension = 6
# Create a Hubbard Hamiltonian
self.ferm_op = fermi_hubbard(self.x_dimension, self.y_dimension, 1.0,
4.0, 0.0, 0.0, False, True)
# Transform the FermionOperator to a QubitOperator without including
# the auxiliary Hamiltonian
self.transformed_op_no_aux = verstraete_cirac_2d_square(
self.ferm_op,
self.x_dimension,
self.y_dimension,
add_auxiliary_hamiltonian=False,
snake=False)
self.transformed_op_no_aux.compress()
# Transform the FermionOperator to a QubitOperator, including
# the auxiliary Hamiltonian
self.transformed_op_aux = verstraete_cirac_2d_square(
self.ferm_op,
self.x_dimension,
self.y_dimension,
add_auxiliary_hamiltonian=True,
snake=False)
self.transformed_op_aux.compress()
def test_operator_locality_no_aux(self):
"""Test that the operators without the auxiliary Hamiltonian
are at most 4-local."""
for term in self.transformed_op_no_aux.terms:
self.assertTrue(len(term) <= 4)
def test_operator_locality_aux(self):
"""Test that the operators with the auxiliary Hamiltonian
are at most 6-local."""
for term in self.transformed_op_aux.terms:
self.assertTrue(len(term) <= 6)
class ExceptionTest(unittest.TestCase):
"""Test that exceptions are raised correctly."""
def test_verstraete_cirac_2d_square(self):
ferm_op = fermi_hubbard(3, 2, 1., 0., spinless=True)
with self.assertRaises(NotImplementedError):
_ = verstraete_cirac_2d_square(ferm_op, 3, 2)
def test_stabilizer_local_2d_square(self):
with self.assertRaises(ValueError):
_ = stabilizer_local_2d_square(0, 2, 4, 4)
def test_coordinates_to_snake_index(self):
with self.assertRaises(ValueError):
_ = coordinates_to_snake_index(4, 4, 4, 5)
with self.assertRaises(ValueError):
_ = coordinates_to_snake_index(4, 4, 5, 4)
def test_snake_index_to_coordinates(self):
with self.assertRaises(ValueError):
_ = snake_index_to_coordinates(20, 4, 5)
|
quantumlib/OpenFermion
|
src/openfermion/transforms/opconversions/verstraete_cirac_test.py
|
Python
|
apache-2.0
| 4,897
|
from datetime import datetime
from django.db import models, transaction, DatabaseError
from django.db.models import Q
from django.utils import timezone
import pytz
from threepio import logger
from core.models.abstract import BaseSource
from core.models.instance_source import InstanceSource
from core.models.provider import Provider
from core.models.identity import Identity
from core.query import only_current_source
class ActiveVolumesManager(models.Manager):
def get_queryset(self):
return super(ActiveVolumesManager, self)\
.get_queryset().filter(only_current_source())
class Volume(BaseSource):
size = models.IntegerField()
name = models.CharField(max_length=256)
description = models.TextField(blank=True, null=True)
objects = models.Manager() # The default manager.
active_volumes = ActiveVolumesManager()
class Meta:
db_table = "volume"
app_label = "core"
def update(self, *args, **kwargs):
"""
Allows for partial updating of the model
"""
# Upload args into kwargs
for arg in args:
for (key, value) in arg.items():
kwargs[key] = value
# Update the values
for key in kwargs.keys():
if hasattr(self, key):
try:
if key in ["provider"]:
continue
setattr(self, key, kwargs[key])
except Exception:
logger.exception("Unable to update key: " + str(key))
self.save()
return self
def get_projects(self, user):
projects = self.projects.filter(
Q(end_date=None) | Q(end_date__gt=timezone.now()),
owner=user)
return projects
def __unicode__(self):
return "%s" % (self.instance_source.identifier)
def get_status(self):
if hasattr(self, 'esh') and self.esh.extra:
status = self.esh.extra["status"]
tmp_status = self.esh.extra.get('tmp_status', '')
if tmp_status:
return "%s - %s" % (status, tmp_status)
return status
last_history = self._get_last_history()
if last_history:
return last_history.status.name
else:
return VolumeStatus.UNKNOWN
def get_device(self):
attach_data = self.get_attach_data()
if attach_data and attach_data.get("device"):
return attach_data["device"]
def get_instance_alias(self):
attach_data = self.get_attach_data()
if attach_data and attach_data.get("instance_alias"):
return attach_data["instance_alias"]
def get_attach_data(self):
if hasattr(self, 'esh') and self.esh.extra:
attach_data = self.esh.extra.get('attachments', {})
else:
attach_data = {}
if attach_data:
if isinstance(attach_data, list) and attach_data:
attach_data = attach_data[0]
if "serverId" in attach_data:
attach_data["instance_alias"] = attach_data["serverId"]
return attach_data
else:
last_history = self._get_last_history()
if last_history\
and (last_history.status.name == VolumeStatus.INUSE
or last_history.status.name == VolumeStatus.ATTACHING):
return last_history.get_attach_data()
return None
def mount_location(self):
"""
TODO: Refactor and use get_metadata.
"""
metadata = {}
if hasattr(self, 'esh') and self.esh.extra:
metadata = self.esh.extra.get('metadata', {})
return metadata.get('mount_location', None)
def esh_attach_data(self):
"""
TODO: Refactor and use get_attach_data.
"""
return self.get_attach_data()
def esh_status(self):
"""
TODO: Refactor and use get_status.
"""
return self.get_status()
def _get_last_history(self):
last_history = self.volumestatushistory_set.all()\
.order_by('-start_date')
if not last_history:
return None
return last_history[0]
def _should_update(self, last_history):
"""
Returns whether a new VolumeStatusHistory needs to be created.
"""
return not last_history\
or self.get_status() != last_history.status.name\
or self.get_device() != last_history.device\
or self.get_instance_alias() != last_history.instance_alias
def _update_history(self):
status = self.get_status()
device = self.get_device()
instance_alias = self.get_instance_alias()
if status != VolumeStatus.UNKNOWN:
last_history = self._get_last_history()
# This is a living volume!
if self.end_date:
self.end_date = None
self.save()
if self._should_update(last_history):
with transaction.atomic():
try:
new_history = VolumeStatusHistory.factory(self)
if last_history:
last_history.end_date = new_history.start_date
last_history.save()
new_history.save()
except DatabaseError as dbe:
logger.exception(
"volume_status_history: Lock is already acquired by"
"another transaction.")
def convert_esh_volume(esh_volume, provider_uuid, identity_uuid=None, user=None):
"""
Get or create the core representation of esh_volume
Attach esh_volume to the object for further introspection..
"""
identifier = esh_volume.id
name = esh_volume.name
size = esh_volume.size
created_on = esh_volume.extra.get('createTime')
try:
source = InstanceSource.objects.get(
identifier=identifier, provider__uuid=provider_uuid)
volume = source.volume
except InstanceSource.DoesNotExist:
if not identity_uuid:
# Author of the Volume cannot be inferred without more details.
raise
volume = create_volume(
name,
identifier,
size,
provider_uuid,
identity_uuid,
user,
created_on)
volume.esh = esh_volume
volume._update_history()
return volume
def create_volume(name, identifier, size, provider_uuid, identity_uuid,
creator, description=None, created_on=None):
provider = Provider.objects.get(uuid=provider_uuid)
identity = Identity.objects.get(uuid=identity_uuid)
source = InstanceSource.objects.create(
identifier=identifier, provider=provider,
created_by=creator, created_by_identity=identity)
volume = Volume.objects.create(
name=name, description=description, size=size, instance_source=source)
if created_on:
# Taking advantage of the ability to save string dates as datetime
# but we need to get the actual date time after we are done..
# NOTE: Why is this different than the method in convert_esh_instance
# NOTE: -Steve
volume.start_date = pytz.utc.localize(created_on)
volume.save()
volume = Volume.objects.get(id=volume.id)
return volume
class VolumeStatus(models.Model):
"""
Used to enumerate the types of actions
(I.e. available, in-use, attaching, detaching)
"""
name = models.CharField(max_length=128)
UNKNOWN = "Unknown"
INUSE = "in-use"
ATTACHING = "attaching"
DETACHING = "detaching"
def __unicode__(self):
return "%s" % self.name
class Meta:
db_table = "volume_status"
app_label = "core"
class VolumeStatusHistory(models.Model):
"""
Used to keep track of each change in volume status.
"""
volume = models.ForeignKey(Volume)
status = models.ForeignKey(VolumeStatus)
device = models.CharField(max_length=128, null=True, blank=True)
instance_alias = models.CharField(max_length=36, null=True, blank=True)
start_date = models.DateTimeField(default=timezone.now)
end_date = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return "Volume:%s Status:%s Attachment:%s Start:%s End:%s" % (
self.volume, self.status,
"N/A" if not self.instance_alias else "Attached to %s(%s)" % (self.instance_alias, self.device),
self.start_date,
"" if not self.end_date else self.end_date)
@classmethod
def factory(cls, volume, start_date=None):
"""
Creates a new VolumeStatusHistory.
NOTE: Unsaved!
"""
status, _ = VolumeStatus.objects.get_or_create(
name=volume.get_status())
device = volume.get_device()
instance_alias = volume.get_instance_alias()
new_history = VolumeStatusHistory(
volume=volume,
device=device,
instance_alias=instance_alias,
status=status)
if start_date:
new_history.start_date = start_date
logger.debug("Created new history object: %s " % (new_history))
return new_history
def get_attach_data(self):
"""
Get attach_data from this VolumeStatusHistory.
"""
return {"device": self.device,
"id": self.volume.instance_source.identifier,
"instance_alias": self.instance_alias}
class Meta:
db_table = "volume_status_history"
app_label = "core"
def find_volume(volume_id):
if type(volume_id) == int:
core_volume = Volume.objects.filter(id=volume_id)
else:
core_volume = Volume.objects.filter(source__identifier=volume_id)
if len(core_volume) > 1:
logger.warn(
"Multiple volumes returned for volume_id - %s" %
volume_id)
if core_volume:
return core_volume[0]
return None
|
CCI-MOC/GUI-Backend
|
core/models/volume.py
|
Python
|
apache-2.0
| 10,159
|
#Part 1: Terminology (15 points)
#1 1pt) What is the symbol "=" used for?
#The "=" symbol is called an assignment operator and it does what it says.
#It assigns values to variables, which is called an assignment statement.
#ex: x = 5
#2 3pts) Write a technical definition for 'function'
#A function is a defined set of sequences which consists of statements that performs a computation.
#Think of a function like a vending machine.
#Some input goes in (the coins), buttons are pressed (computation), and then the drink or food comes out (output).
#3 1pt) What does the keyword "return" do?
#"Return" returns a value. It is used at the end of function in order for the funtion to "spit" some value back out.
#4 5pts) We know 5 basic data types. Write the name for each one and provide two
# examples of each below
# 1: String: "hi", "bye", str(5)
# 2: Intergers: 5, 6, 7, 8
# 3: Float: 5.0, 3.14, 0.8946
# 4: Boolean: True & False
# 5: Tuple: "JJC", "is so cool", "42"
#5 2pts) What is the difference between a "function definition" and a
# "function call"?
#Defining a function is a header in a function. It defines the function name.
#ex: defa(b):
#Calling a function executes the function. It runs the set of codes.
#ex: a(5)
#6 3pts) What are the 3 phases that every computer program has? What happens in
# each of them
# 1: Input: The user gives the function some data. It can be through a variable or even using the function (raw_input).
# 2: Calculation/ Computation: This is the main body of the function. The function will do whatever the coding says to do.
# 3: Output: The output is the value once the computation has been completed. This can be a sum of any numbers or even a sentence refering to how cute cats can be.
#(Refer to the vending machine analogy above. Thank you.)
#
#Part 2: Programming (25 points)
#Write a program that asks the user for the areas of 3 circles.
#It should then calculate the diameter of each and the sum of the diameters
#of the 3 circles.
#Finally, it should produce output like this:
#Circle Diameter
#c1 ...
#c2 ...
#c3 ...
#TOTALS ...
import math
def output(ac1, ac2, ac3, dc1, dc2, dc3, td):
return """
You have insert the values: {}, {}, and {} as the areas of the three circle we will be using!
This means that the diamters will be....hmmmm....
The diameter of the first circle is {}.
The diameter of the second circle is {}.
The diameter of the thrid circle is {}.
The total of the diameters of the first circle, second cricle and the thrid circle is {}.
""".format(ac1, ac2, ac3, dc1, dc2, dc3, td)
def main():
print "Welcome to JJC's Magical Area to Diameter Calculator! \nNow lets begin!"
ac1 = float(raw_input("Please insert an area of a circle: "))
ac2 = float(raw_input(""""Another one" - DJ Khaled : """))
ac3 = float(raw_input("Now type some random number between 5 and 500: "))
dc1 = 2 * (math.sqrt((ac1/math.pi)))
dc2 = 2 * (math.sqrt((ac2/math.pi)))
dc3 = 2 * (math.sqrt((ac3/math.pi)))
td = dc1 + dc2 + dc3
print output(ac1, ac2, ac3, dc1, dc2, dc3, td)
main()
|
jaejun1679-cmis/jaejun1679-cmis-cs2
|
cs2quiz1.py
|
Python
|
cc0-1.0
| 3,123
|
import ConfigParser
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask, url_for
app = Flask(__name__)
@app.route('/')
def root():
this_route = url_for('.root')
app.logger.info("Logging a test message from "+this_route)
return "Hello Napier from the configuration testing app (Now with added logging)"
def init(app):
config = ConfigParser.ConfigParser()
try:
config_location = "etc/logging.cfg"
config.read(config_location)
app.config['DEBUG'] = config.get("config", "debug")
app.config['ip_address'] = config.get("config", "ip_address")
app.config['port'] = config.get("config", "port")
app.config['url'] = config.get("config", "url")
app.config['log_file'] = config.get("logging", "name")
app.config['log_location'] = config.get("logging", "location")
app.config['log_level'] = config.get("logging", "level")
except:
print "Could not read configs from: ", config_location
def logs(app):
log_pathname = app.config['log_location'] + app.config['log_file']
file_handler = RotatingFileHandler(log_pathname, maxBytes=1024* 1024 * 10 , backupCount=1024)
file_handler.setLevel( app.config['log_level'] )
formatter = logging.Formatter("%(levelname)s | %(asctime)s | %(module)s | %(funcName)s | %(message)s")
file_handler.setFormatter(formatter)
app.logger.setLevel( app.config['log_level'] )
app.logger.addHandler(file_handler)
init(app)
logs(app)
if __name__ == '__main__':
init(app)
logs(app)
app.run(
host=app.config['ip_address'],
port=int(app.config['port']))
|
siwells/teaching_set09103
|
code/topic_08/logs.py
|
Python
|
gpl-3.0
| 1,689
|
"""Common test objects."""
import copy
import json
from unittest.mock import ANY
from hatasmota.const import (
CONF_MAC,
CONF_OFFLINE,
CONF_ONLINE,
CONF_PREFIX,
PREFIX_CMND,
PREFIX_TELE,
)
from hatasmota.utils import (
config_get_state_offline,
config_get_state_online,
get_topic_tele_state,
get_topic_tele_will,
)
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from homeassistant.const import STATE_UNAVAILABLE
from homeassistant.helpers import device_registry as dr, entity_registry as er
from tests.common import async_fire_mqtt_message
DEFAULT_CONFIG = {
"ip": "192.168.15.10",
"dn": "Tasmota",
"fn": ["Test", "Beer", "Milk", "Four", None],
"hn": "tasmota_49A3BC-0956",
"if": 0, # iFan
"lk": 1, # RGB + white channels linked to a single light
"mac": "00000049A3BC",
"md": "Sonoff Basic",
"ofln": "Offline",
"onln": "Online",
"state": ["OFF", "ON", "TOGGLE", "HOLD"],
"sw": "8.4.0.2",
"swn": [None, None, None, None, None],
"t": "tasmota_49A3BC",
"ft": "%topic%/%prefix%/",
"tp": ["cmnd", "stat", "tele"],
"rl": [0, 0, 0, 0, 0, 0, 0, 0],
"swc": [-1, -1, -1, -1, -1, -1, -1, -1],
"btn": [0, 0, 0, 0],
"so": {
"4": 0, # Return MQTT response as RESULT or %COMMAND%
"11": 0, # Swap button single and double press functionality
"13": 0, # Allow immediate action on single button press
"17": 1, # Show Color string as hex or comma-separated
"20": 0, # Update of Dimmer/Color/CT without turning power on
"30": 0, # Enforce Home Assistant auto-discovery as light
"68": 0, # Multi-channel PWM instead of a single light
"73": 0, # Enable Buttons decoupling and send multi-press and hold MQTT messages
"82": 0, # Reduce the CT range from 153..500 to 200.380
"114": 0, # Enable sending switch MQTT messages
},
"ty": 0, # Tuya MCU
"lt_st": 0,
"ver": 1,
}
DEFAULT_CONFIG_9_0_0_3 = {
"ip": "192.168.15.10",
"dn": "Tasmota",
"fn": ["Test", "Beer", "Milk", "Four", None],
"hn": "tasmota_49A3BC-0956",
"lk": 1, # RGB + white channels linked to a single light
"mac": "00000049A3BC",
"md": "Sonoff Basic",
"ofln": "Offline",
"onln": "Online",
"state": ["OFF", "ON", "TOGGLE", "HOLD"],
"sw": "8.4.0.2",
"t": "tasmota_49A3BC",
"ft": "%topic%/%prefix%/",
"tp": ["cmnd", "stat", "tele"],
"rl": [0, 0, 0, 0, 0, 0, 0, 0],
"swc": [-1, -1, -1, -1, -1, -1, -1, -1],
"btn": [0, 0, 0, 0],
"so": {
"11": 0, # Swap button single and double press functionality
"13": 0, # Allow immediate action on single button press
"17": 1, # Show Color string as hex or comma-separated
"20": 0, # Update of Dimmer/Color/CT without turning power on
"30": 0, # Enforce Home Assistant auto-discovery as light
"68": 0, # Multi-channel PWM instead of a single light
"73": 0, # Enable Buttons decoupling and send multi-press and hold MQTT messages
"80": 0, # Blinds and shutters support
"82": 0, # Reduce the CT range from 153..500 to 200.380
},
"ty": 0, # Tuya MCU
"lt_st": 0,
"ver": 1,
}
async def help_test_availability_when_connection_lost(
hass,
mqtt_client_mock,
mqtt_mock,
domain,
config,
sensor_config=None,
entity_id="test",
):
"""Test availability after MQTT disconnection.
This is a test helper for the TasmotaAvailability mixin.
"""
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config",
json.dumps(config),
)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
# Device online
async_fire_mqtt_message(
hass,
get_topic_tele_will(config),
config_get_state_online(config),
)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state != STATE_UNAVAILABLE
# Disconnected from MQTT server -> state changed to unavailable
mqtt_mock.connected = False
await hass.async_add_executor_job(mqtt_client_mock.on_disconnect, None, None, 0)
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
# Reconnected to MQTT server -> state still unavailable
mqtt_mock.connected = True
await hass.async_add_executor_job(mqtt_client_mock.on_connect, None, None, None, 0)
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
# Receive LWT again
async_fire_mqtt_message(
hass,
get_topic_tele_will(config),
config_get_state_online(config),
)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state != STATE_UNAVAILABLE
async def help_test_availability(
hass,
mqtt_mock,
domain,
config,
sensor_config=None,
entity_id="test",
):
"""Test availability.
This is a test helper for the TasmotaAvailability mixin.
"""
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config",
json.dumps(config),
)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(
hass,
get_topic_tele_will(config),
config_get_state_online(config),
)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(
hass,
get_topic_tele_will(config),
config_get_state_offline(config),
)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
async def help_test_availability_discovery_update(
hass,
mqtt_mock,
domain,
config,
sensor_config=None,
entity_id="test",
):
"""Test update of discovered TasmotaAvailability.
This is a test helper for the TasmotaAvailability mixin.
"""
# customize availability topic
config1 = copy.deepcopy(config)
config1[CONF_PREFIX][PREFIX_TELE] = "tele1"
config1[CONF_OFFLINE] = "offline1"
config1[CONF_ONLINE] = "online1"
config2 = copy.deepcopy(config)
config2[CONF_PREFIX][PREFIX_TELE] = "tele2"
config2[CONF_OFFLINE] = "offline2"
config2[CONF_ONLINE] = "online2"
data1 = json.dumps(config1)
data2 = json.dumps(config2)
availability_topic1 = get_topic_tele_will(config1)
availability_topic2 = get_topic_tele_will(config2)
assert availability_topic1 != availability_topic2
offline1 = config_get_state_offline(config1)
offline2 = config_get_state_offline(config2)
assert offline1 != offline2
online1 = config_get_state_online(config1)
online2 = config_get_state_online(config2)
assert online1 != online2
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config1[CONF_MAC]}/config", data1)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, availability_topic1, online1)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, availability_topic1, offline1)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
# Change availability settings
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config2[CONF_MAC]}/config", data2)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic or payload
async_fire_mqtt_message(hass, availability_topic1, online1)
async_fire_mqtt_message(hass, availability_topic1, online2)
async_fire_mqtt_message(hass, availability_topic2, online1)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, availability_topic2, online2)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state != STATE_UNAVAILABLE
async def help_test_availability_poll_state(
hass,
mqtt_client_mock,
mqtt_mock,
domain,
config,
poll_topic,
poll_payload,
sensor_config=None,
):
"""Test polling of state when device is available.
This is a test helper for the TasmotaAvailability mixin.
"""
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config",
json.dumps(config),
)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Device online, verify poll for state
async_fire_mqtt_message(
hass,
get_topic_tele_will(config),
config_get_state_online(config),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.assert_called_once_with(poll_topic, poll_payload, 0, False)
mqtt_mock.async_publish.reset_mock()
# Disconnected from MQTT server
mqtt_mock.connected = False
await hass.async_add_executor_job(mqtt_client_mock.on_disconnect, None, None, 0)
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
# Reconnected to MQTT server
mqtt_mock.connected = True
await hass.async_add_executor_job(mqtt_client_mock.on_connect, None, None, None, 0)
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
# Device online, verify poll for state
async_fire_mqtt_message(
hass,
get_topic_tele_will(config),
config_get_state_online(config),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.assert_called_once_with(poll_topic, poll_payload, 0, False)
async def help_test_discovery_removal(
hass,
mqtt_mock,
caplog,
domain,
config1,
config2,
sensor_config1=None,
sensor_config2=None,
entity_id="test",
name="Test",
):
"""Test removal of discovered entity."""
device_reg = dr.async_get(hass)
entity_reg = er.async_get(hass)
data1 = json.dumps(config1)
data2 = json.dumps(config2)
assert config1[CONF_MAC] == config2[CONF_MAC]
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config1[CONF_MAC]}/config", data1)
await hass.async_block_till_done()
if sensor_config1:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config1[CONF_MAC]}/sensors",
json.dumps(sensor_config1),
)
await hass.async_block_till_done()
# Verify device and entity registry entries are created
device_entry = device_reg.async_get_device(set(), {("mac", config1[CONF_MAC])})
assert device_entry is not None
entity_entry = entity_reg.async_get(f"{domain}.{entity_id}")
assert entity_entry is not None
# Verify state is added
state = hass.states.get(f"{domain}.{entity_id}")
assert state is not None
assert state.name == name
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config2[CONF_MAC]}/config", data2)
await hass.async_block_till_done()
if sensor_config1:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config2[CONF_MAC]}/sensors",
json.dumps(sensor_config2),
)
await hass.async_block_till_done()
# Verify entity registry entries are cleared
device_entry = device_reg.async_get_device(set(), {("mac", config2[CONF_MAC])})
assert device_entry is not None
entity_entry = entity_reg.async_get(f"{domain}.{entity_id}")
assert entity_entry is None
# Verify state is removed
state = hass.states.get(f"{domain}.{entity_id}")
assert state is None
async def help_test_discovery_update_unchanged(
hass,
mqtt_mock,
caplog,
domain,
config,
discovery_update,
sensor_config=None,
entity_id="test",
name="Test",
):
"""Test update of discovered component with and without changes.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
config1 = copy.deepcopy(config)
config2 = copy.deepcopy(config)
config2[CONF_PREFIX][PREFIX_CMND] = "cmnd2"
config2[CONF_PREFIX][PREFIX_TELE] = "tele2"
data1 = json.dumps(config1)
data2 = json.dumps(config2)
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data1)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.{entity_id}")
assert state is not None
assert state.name == name
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data1)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
assert not discovery_update.called
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data2)
await hass.async_block_till_done()
assert discovery_update.called
async def help_test_discovery_device_remove(
hass, mqtt_mock, domain, unique_id, config, sensor_config=None
):
"""Test domain entity is removed when device is removed."""
device_reg = dr.async_get(hass)
entity_reg = er.async_get(hass)
config = copy.deepcopy(config)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
device = device_reg.async_get_device(set(), {("mac", config[CONF_MAC])})
assert device is not None
assert entity_reg.async_get_entity_id(domain, "tasmota", unique_id)
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", "")
await hass.async_block_till_done()
device = device_reg.async_get_device(set(), {("mac", config[CONF_MAC])})
assert device is None
assert not entity_reg.async_get_entity_id(domain, "tasmota", unique_id)
async def help_test_entity_id_update_subscriptions(
hass, mqtt_mock, domain, config, topics=None, sensor_config=None, entity_id="test"
):
"""Test MQTT subscriptions are managed when entity_id is updated."""
entity_reg = er.async_get(hass)
config = copy.deepcopy(config)
data = json.dumps(config)
mqtt_mock.async_subscribe.reset_mock()
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
if not topics:
topics = [get_topic_tele_state(config), get_topic_tele_will(config)]
assert len(topics) > 0
state = hass.states.get(f"{domain}.{entity_id}")
assert state is not None
assert mqtt_mock.async_subscribe.call_count == len(topics)
for topic in topics:
mqtt_mock.async_subscribe.assert_any_call(topic, ANY, ANY, ANY)
mqtt_mock.async_subscribe.reset_mock()
entity_reg.async_update_entity(
f"{domain}.{entity_id}", new_entity_id=f"{domain}.milk"
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.{entity_id}")
assert state is None
state = hass.states.get(f"{domain}.milk")
assert state is not None
for topic in topics:
mqtt_mock.async_subscribe.assert_any_call(topic, ANY, ANY, ANY)
async def help_test_entity_id_update_discovery_update(
hass, mqtt_mock, domain, config, sensor_config=None, entity_id="test"
):
"""Test MQTT discovery update after entity_id is updated."""
entity_reg = er.async_get(hass)
config = copy.deepcopy(config)
data = json.dumps(config)
topic = get_topic_tele_will(config)
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, topic, config_get_state_online(config))
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, topic, config_get_state_offline(config))
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
entity_reg.async_update_entity(
f"{domain}.{entity_id}", new_entity_id=f"{domain}.milk"
)
await hass.async_block_till_done()
assert hass.states.get(f"{domain}.milk")
assert config[CONF_PREFIX][PREFIX_TELE] != "tele2"
config[CONF_PREFIX][PREFIX_TELE] = "tele2"
data = json.dumps(config)
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(domain)) == 1
topic = get_topic_tele_will(config)
async_fire_mqtt_message(hass, topic, config_get_state_online(config))
state = hass.states.get(f"{domain}.milk")
assert state.state != STATE_UNAVAILABLE
|
w1ll1am23/home-assistant
|
tests/components/tasmota/test_common.py
|
Python
|
apache-2.0
| 19,194
|
# Programmer: Noah Osterhout
# Date: September 30th 2016 1:40PM EST
# Project: Kirby_Physics.py
#Ask what Problem they will be using
print()
print("This Program will find the misisng Variables using the three known ones and using PEMDAS")
print()
beetles_mem = input("What Beetles member will you be using? ")
gravity_global = -9.8
if beetles_mem == "John":
john_time = int(input("What is the Time in seconds? "))
new_john_time = john_time ** 2
john_Vi = int(input("What is the Initial Velocity? "))
#Calculate using John Formula
john_formula = .5 * gravity_global * new_john_time
print("The Distance would be", john_formula)
elif beetles_mem == "Paul":
paul_Vf = int(input("What is the Final Velocity? "))
paul_Vi = int(input("What is the Intial Velocity? "))
paul_time = int(input("What is the Time in seconds? "))
#Calculate using Paul Formula
paul_formula = .5 * (paul_Vf + paul_Vi) * paul_time
print("The Distance would be", paul_formula)
elif beetles_mem == "George":
george_Vi = int(input("What is the Intial Velocity? "))
george_time = int(input("What is the Time in seconds? "))
#Calculate using George Formula
george_formula = george_Vi + gravity_global * george_time
print("The Final Velocity is", george_formula)
elif beetles_mem == "Ringo":
ringo_Vi = int(input("What is the Initial Velocity? "))
new_ringo_Vi = ringo_Vi ** 2
ringo_dist = int(input("What is the Distance? "))
#Calculate using Ringo Formula
ringo_formula = new_ringo_Vi + 2 * gravity_global * ringo_dist
print("The Final Velocity is", ringo_formula, "EE 2")
elif beetles_mem == "Kirby":
print("Kirby wishes he was a Beetles member")
else: print("ERROR! Unknown Beetles Member!")
|
NoahFlowa/CTC_Projects
|
Osterhout_Python/Kirby_Physics.py
|
Python
|
mit
| 1,794
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.