gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
## @file
# This file is used to define class objects of INF file [Binaries] section.
# It will consumed by InfParser.
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
'''
InfBinaryObject
'''
import os
from copy import deepcopy
from Library import DataType as DT
from Library import GlobalData
import Logger.Log as Logger
from Logger import ToolError
from Logger import StringTable as ST
from Library.Misc import Sdict
from Object.Parser.InfCommonObject import InfSectionCommonDef
from Object.Parser.InfCommonObject import CurrentLine
from Library.Misc import ConvPathFromAbsToRel
from Library.ExpressionValidate import IsValidFeatureFlagExp
from Library.Misc import ValidFile
from Library.ParserValidate import IsValidPath
class InfBianryItem():
def __init__(self):
self.FileName = ''
self.Target = ''
self.FeatureFlagExp = ''
self.HelpString = ''
self.Type = ''
self.SupArchList = []
def SetFileName(self, FileName):
self.FileName = FileName
def GetFileName(self):
return self.FileName
def SetTarget(self, Target):
self.Target = Target
def GetTarget(self):
return self.Target
def SetFeatureFlagExp(self, FeatureFlagExp):
self.FeatureFlagExp = FeatureFlagExp
def GetFeatureFlagExp(self):
return self.FeatureFlagExp
def SetHelpString(self, HelpString):
self.HelpString = HelpString
def GetHelpString(self):
return self.HelpString
def SetType(self, Type):
self.Type = Type
def GetType(self):
return self.Type
def SetSupArchList(self, SupArchList):
self.SupArchList = SupArchList
def GetSupArchList(self):
return self.SupArchList
class InfBianryVerItem(InfBianryItem, CurrentLine):
def __init__(self):
InfBianryItem.__init__(self)
CurrentLine.__init__(self)
self.VerTypeName = ''
def SetVerTypeName(self, VerTypeName):
self.VerTypeName = VerTypeName
def GetVerTypeName(self):
return self.VerTypeName
class InfBianryUiItem(InfBianryItem, CurrentLine):
def __init__(self):
InfBianryItem.__init__(self)
CurrentLine.__init__(self)
self.UiTypeName = ''
def SetUiTypeName(self, UiTypeName):
self.UiTypeName = UiTypeName
def GetVerTypeName(self):
return self.UiTypeName
class InfBianryCommonItem(InfBianryItem, CurrentLine):
def __init__(self):
self.CommonType = ''
self.TagName = ''
self.Family = ''
InfBianryItem.__init__(self)
CurrentLine.__init__(self)
def SetCommonType(self, CommonType):
self.CommonType = CommonType
def GetCommonType(self):
return self.CommonType
def SetTagName(self, TagName):
self.TagName = TagName
def GetTagName(self):
return self.TagName
def SetFamily(self, Family):
self.Family = Family
def GetFamily(self):
return self.Family
##
#
#
#
class InfBinariesObject(InfSectionCommonDef):
def __init__(self):
self.Binaries = Sdict()
#
# Macro defined in this section should be only used in this section.
#
self.Macros = {}
InfSectionCommonDef.__init__(self)
## CheckVer
#
#
def CheckVer(self, Ver, __SupArchList):
#
# Check Ver
#
for VerItem in Ver:
IsValidFileFlag = False
VerContent = VerItem[0]
VerComment = VerItem[1]
VerCurrentLine = VerItem[2]
GlobalData.gINF_CURRENT_LINE = VerCurrentLine
InfBianryVerItemObj = None
#
# Should not less than 2 elements
#
if len(VerContent) < 2:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_BINARY_ITEM_FORMAT_INVALID % (VerContent[0]),
File=VerCurrentLine.GetFileName(),
Line=VerCurrentLine.GetLineNo(),
ExtraData=VerCurrentLine.GetLineString())
return False
if len(VerContent) > 4:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_BINARY_ITEM_FORMAT_INVALID_MAX % (VerContent[0], 4),
File=VerCurrentLine.GetFileName(),
Line=VerCurrentLine.GetLineNo(),
ExtraData=VerCurrentLine.GetLineString())
return False
if len(VerContent) >= 2:
#
# Create a Ver Object.
#
InfBianryVerItemObj = InfBianryVerItem()
if VerContent[0] != DT.BINARY_FILE_TYPE_VER:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_BINARY_VER_TYPE % DT.BINARY_FILE_TYPE_VER,
File=VerCurrentLine.GetFileName(),
Line=VerCurrentLine.GetLineNo(),
ExtraData=VerCurrentLine.GetLineString())
InfBianryVerItemObj.SetVerTypeName(VerContent[0])
InfBianryVerItemObj.SetType(VerContent[0])
#
# Verify File exist or not
#
FullFileName = os.path.normpath(os.path.realpath(os.path.join(GlobalData.gINF_MODULE_DIR,
VerContent[1])))
if not (ValidFile(FullFileName) or ValidFile(VerContent[1])):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_BINARY_ITEM_FILE_NOT_EXIST % (VerContent[1]),
File=VerCurrentLine.GetFileName(),
Line=VerCurrentLine.GetLineNo(),
ExtraData=VerCurrentLine.GetLineString())
#
# Validate file exist/format.
#
if IsValidPath(VerContent[1], GlobalData.gINF_MODULE_DIR):
IsValidFileFlag = True
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FILE_NOT_EXIST_OR_NAME_INVALID % (VerContent[1]),
File=VerCurrentLine.GetFileName(),
Line=VerCurrentLine.GetLineNo(),
ExtraData=VerCurrentLine.GetLineString())
return False
if IsValidFileFlag:
VerContent[0] = ConvPathFromAbsToRel(VerContent[0],
GlobalData.gINF_MODULE_DIR)
InfBianryVerItemObj.SetFileName(VerContent[1])
if len(VerContent) >= 3:
#
# Add Target information
#
InfBianryVerItemObj.SetTarget(VerContent[2])
if len(VerContent) == 4:
if VerContent[3].strip() == '':
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_MISSING,
File=VerCurrentLine.GetFileName(),
Line=VerCurrentLine.GetLineNo(),
ExtraData=VerCurrentLine.GetLineString())
#
# Validate Feature Flag Express
#
FeatureFlagRtv = IsValidFeatureFlagExp(VerContent[3].\
strip())
if not FeatureFlagRtv[0]:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_SYNTAX_INVLID % (FeatureFlagRtv[1]),
File=VerCurrentLine.GetFileName(),
Line=VerCurrentLine.GetLineNo(),
ExtraData=VerCurrentLine.GetLineString())
InfBianryVerItemObj.SetFeatureFlagExp(VerContent[3])
InfBianryVerItemObj.SetSupArchList(__SupArchList)
#
# Determine binary file name duplicate. Follow below rule:
#
# A binary filename must not be duplicated within
# a [Binaries] section. A binary filename may appear in
# multiple architectural [Binaries] sections. A binary
# filename listed in an architectural [Binaries] section
# must not be listed in the common architectural
# [Binaries] section.
#
# NOTE: This check will not report error now.
#
for Item in self.Binaries:
if Item.GetFileName() == InfBianryVerItemObj.GetFileName():
ItemSupArchList = Item.GetSupArchList()
for ItemArch in ItemSupArchList:
for VerItemObjArch in __SupArchList:
if ItemArch == VerItemObjArch:
#
# ST.ERR_INF_PARSER_ITEM_DUPLICATE
#
pass
if ItemArch.upper() == 'COMMON' or VerItemObjArch.upper() == 'COMMON':
#
# ERR_INF_PARSER_ITEM_DUPLICATE_COMMON
#
pass
if InfBianryVerItemObj != None:
if self.Binaries.has_key((InfBianryVerItemObj)):
BinariesList = self.Binaries[InfBianryVerItemObj]
BinariesList.append((InfBianryVerItemObj, VerComment))
self.Binaries[InfBianryVerItemObj] = BinariesList
else:
BinariesList = []
BinariesList.append((InfBianryVerItemObj, VerComment))
self.Binaries[InfBianryVerItemObj] = BinariesList
## ParseCommonBinary
#
# ParseCommonBinary
#
def ParseCommonBinary(self, CommonBinary, __SupArchList):
#
# Check common binary definitions
# Type | FileName | Target | Family | TagName | FeatureFlagExp
#
for Item in CommonBinary:
IsValidFileFlag = False
ItemContent = Item[0]
ItemComment = Item[1]
CurrentLineOfItem = Item[2]
GlobalData.gINF_CURRENT_LINE = CurrentLineOfItem
InfBianryCommonItemObj = None
if len(ItemContent) < 2:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_BINARY_ITEM_FORMAT_INVALID % (ItemContent[0]),
File=CurrentLineOfItem.GetFileName(),
Line=CurrentLineOfItem.GetLineNo(),
ExtraData=CurrentLineOfItem.GetLineString())
return False
if len(ItemContent) > 6:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_BINARY_ITEM_FORMAT_INVALID_MAX % (ItemContent[0], 6),
File=CurrentLineOfItem.GetFileName(),
Line=CurrentLineOfItem.GetLineNo(),
ExtraData=CurrentLineOfItem.GetLineString())
return False
if len(ItemContent) >= 2:
#
# Create a Common Object.
#
InfBianryCommonItemObj = InfBianryCommonItem()
#
# Convert Binary type.
#
BinaryFileType = ItemContent[0].strip()
if BinaryFileType == 'RAW' or BinaryFileType == 'ACPI' or BinaryFileType == 'ASL':
BinaryFileType = 'BIN'
if BinaryFileType not in DT.BINARY_FILE_TYPE_LIST:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_BINARY_ITEM_INVALID_FILETYPE % \
(DT.BINARY_FILE_TYPE_LIST.__str__()),
File=CurrentLineOfItem.GetFileName(),
Line=CurrentLineOfItem.GetLineNo(),
ExtraData=CurrentLineOfItem.GetLineString())
if BinaryFileType == 'SUBTYPE_GUID':
BinaryFileType = 'FREEFORM'
if BinaryFileType == 'LIB' or BinaryFileType == 'UEFI_APP':
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_BINARY_ITEM_INVALID_FILETYPE % \
(DT.BINARY_FILE_TYPE_LIST.__str__()),
File=CurrentLineOfItem.GetFileName(),
Line=CurrentLineOfItem.GetLineNo(),
ExtraData=CurrentLineOfItem.GetLineString())
InfBianryCommonItemObj.SetType(BinaryFileType)
InfBianryCommonItemObj.SetCommonType(ItemContent[0])
#
# Verify File exist or not
#
FullFileName = os.path.normpath(os.path.realpath(os.path.join(GlobalData.gINF_MODULE_DIR,
ItemContent[1])))
if not (ValidFile(FullFileName) or ValidFile(ItemContent[1])):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_BINARY_ITEM_FILE_NOT_EXIST % (ItemContent[1]),
File=CurrentLineOfItem.GetFileName(),
Line=CurrentLineOfItem.GetLineNo(),
ExtraData=CurrentLineOfItem.GetLineString())
#
# Validate file exist/format.
#
if IsValidPath(ItemContent[1], GlobalData.gINF_MODULE_DIR):
IsValidFileFlag = True
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FILE_NOT_EXIST_OR_NAME_INVALID % (ItemContent[1]),
File=CurrentLineOfItem.GetFileName(),
Line=CurrentLineOfItem.GetLineNo(),
ExtraData=CurrentLineOfItem.GetLineString())
return False
if IsValidFileFlag:
ItemContent[0] = ConvPathFromAbsToRel(ItemContent[0], GlobalData.gINF_MODULE_DIR)
InfBianryCommonItemObj.SetFileName(ItemContent[1])
if len(ItemContent) >= 3:
#
# Add Target information
#
InfBianryCommonItemObj.SetTarget(ItemContent[2])
if len(ItemContent) >= 4:
#
# Add Family information
#
InfBianryCommonItemObj.SetFamily(ItemContent[3])
if len(ItemContent) >= 5:
#
# TagName entries are build system specific. If there
# is content in the entry, the tool must exit
# gracefully with an error message that indicates build
# system specific content cannot be distributed using
# the UDP
#
if ItemContent[4].strip() != '':
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_TAGNAME_NOT_PERMITTED % (ItemContent[4]),
File=CurrentLineOfItem.GetFileName(),
Line=CurrentLineOfItem.GetLineNo(),
ExtraData=CurrentLineOfItem.GetLineString())
if len(ItemContent) == 6:
#
# Add FeatureFlagExp
#
if ItemContent[5].strip() == '':
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_MISSING,
File=CurrentLineOfItem.GetFileName(),
Line=CurrentLineOfItem.GetLineNo(),
ExtraData=CurrentLineOfItem.GetLineString())
#
# Validate Feature Flag Express
#
FeatureFlagRtv = IsValidFeatureFlagExp(ItemContent[5].strip())
if not FeatureFlagRtv[0]:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_SYNTAX_INVLID % (FeatureFlagRtv[1]),
File=CurrentLineOfItem.GetFileName(),
Line=CurrentLineOfItem.GetLineNo(),
ExtraData=CurrentLineOfItem.GetLineString())
InfBianryCommonItemObj.SetFeatureFlagExp(ItemContent[5])
InfBianryCommonItemObj.SetSupArchList(__SupArchList)
#
# Determine binary file name duplicate. Follow below rule:
#
# A binary filename must not be duplicated within
# a [Binaries] section. A binary filename may appear in
# multiple architectural [Binaries] sections. A binary
# filename listed in an architectural [Binaries] section
# must not be listed in the common architectural
# [Binaries] section.
#
# NOTE: This check will not report error now.
#
# for Item in self.Binaries:
# if Item.GetFileName() == InfBianryCommonItemObj.GetFileName():
# ItemSupArchList = Item.GetSupArchList()
# for ItemArch in ItemSupArchList:
# for ComItemObjArch in __SupArchList:
# if ItemArch == ComItemObjArch:
# #
# # ST.ERR_INF_PARSER_ITEM_DUPLICATE
# #
# pass
#
# if ItemArch.upper() == 'COMMON' or ComItemObjArch.upper() == 'COMMON':
# #
# # ERR_INF_PARSER_ITEM_DUPLICATE_COMMON
# #
# pass
if InfBianryCommonItemObj != None:
if self.Binaries.has_key((InfBianryCommonItemObj)):
BinariesList = self.Binaries[InfBianryCommonItemObj]
BinariesList.append((InfBianryCommonItemObj, ItemComment))
self.Binaries[InfBianryCommonItemObj] = BinariesList
else:
BinariesList = []
BinariesList.append((InfBianryCommonItemObj, ItemComment))
self.Binaries[InfBianryCommonItemObj] = BinariesList
def SetBinary(self, UiInf=None, Ver=None, CommonBinary=None, ArchList=None):
__SupArchList = []
for ArchItem in ArchList:
#
# Validate Arch
#
if (ArchItem == '' or ArchItem == None):
ArchItem = 'COMMON'
__SupArchList.append(ArchItem)
if UiInf != None:
if len(UiInf) > 0:
#
# Check UI
#
for UiItem in UiInf:
IsValidFileFlag = False
InfBianryUiItemObj = None
UiContent = UiItem[0]
UiComment = UiItem[1]
UiCurrentLine = UiItem[2]
GlobalData.gINF_CURRENT_LINE = deepcopy(UiItem[2])
#
# Should not less than 2 elements
#
if len(UiContent) < 2:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_BINARY_ITEM_FORMAT_INVALID % (UiContent[0]),
File=UiCurrentLine.GetFileName(),
Line=UiCurrentLine.GetLineNo(),
ExtraData=UiCurrentLine.GetLineString())
return False
if len(UiContent) > 4:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_BINARY_ITEM_FORMAT_INVALID_MAX % (UiContent[0], 4),
File=UiCurrentLine.GetFileName(),
Line=UiCurrentLine.GetLineNo(),
ExtraData=UiCurrentLine.GetLineString())
return False
if len(UiContent) >= 2:
#
# Create an Ui Object.
#
InfBianryUiItemObj = InfBianryUiItem()
if UiContent[0] != 'UI':
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_BINARY_VER_TYPE % ('UI'),
File=UiCurrentLine.GetFileName(),
Line=UiCurrentLine.GetLineNo(),
ExtraData=UiCurrentLine.GetLineString())
InfBianryUiItemObj.SetUiTypeName(UiContent[0])
InfBianryUiItemObj.SetType(UiContent[0])
#
# Verify File exist or not
#
FullFileName = os.path.normpath(os.path.realpath(os.path.join(GlobalData.gINF_MODULE_DIR,
UiContent[1])))
if not (ValidFile(FullFileName) or ValidFile(UiContent[1])):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_BINARY_ITEM_FILE_NOT_EXIST % (UiContent[1]),
File=UiCurrentLine.GetFileName(),
Line=UiCurrentLine.GetLineNo(),
ExtraData=UiCurrentLine.GetLineString())
#
# Validate file exist/format.
#
if IsValidPath(UiContent[1], GlobalData.gINF_MODULE_DIR):
IsValidFileFlag = True
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FILE_NOT_EXIST_OR_NAME_INVALID % (UiContent[1]),
File=UiCurrentLine.GetFileName(),
Line=UiCurrentLine.GetLineNo(),
ExtraData=UiCurrentLine.GetLineString())
return False
if IsValidFileFlag:
UiContent[0] = ConvPathFromAbsToRel(UiContent[0], GlobalData.gINF_MODULE_DIR)
InfBianryUiItemObj.SetFileName(UiContent[1])
if len(UiContent) >= 3:
#
# Add Target information
#
InfBianryUiItemObj.SetTarget(UiContent[2])
if len(UiContent) == 4:
if UiContent[3].strip() == '':
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_MISSING,
File=UiCurrentLine.GetFileName(),
Line=UiCurrentLine.GetLineNo(),
ExtraData=UiCurrentLine.GetLineString())
#
# Validate Feature Flag Express
#
FeatureFlagRtv = IsValidFeatureFlagExp(UiContent[3].strip())
if not FeatureFlagRtv[0]:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_SYNTAX_INVLID % (FeatureFlagRtv[1]),
File=UiCurrentLine.GetFileName(),
Line=UiCurrentLine.GetLineNo(),
ExtraData=UiCurrentLine.GetLineString())
InfBianryUiItemObj.SetFeatureFlagExp(UiContent[3])
InfBianryUiItemObj.SetSupArchList(__SupArchList)
#
# Determine binary file name duplicate. Follow below rule:
#
# A binary filename must not be duplicated within
# a [Binaries] section. A binary filename may appear in
# multiple architectural [Binaries] sections. A binary
# filename listed in an architectural [Binaries] section
# must not be listed in the common architectural
# [Binaries] section.
#
# NOTE: This check will not report error now.
#
# for Item in self.Binaries:
# if Item.GetFileName() == InfBianryUiItemObj.GetFileName():
# ItemSupArchList = Item.GetSupArchList()
# for ItemArch in ItemSupArchList:
# for UiItemObjArch in __SupArchList:
# if ItemArch == UiItemObjArch:
# #
# # ST.ERR_INF_PARSER_ITEM_DUPLICATE
# #
# pass
# if ItemArch.upper() == 'COMMON' or UiItemObjArch.upper() == 'COMMON':
# #
# # ERR_INF_PARSER_ITEM_DUPLICATE_COMMON
# #
# pass
if InfBianryUiItemObj != None:
if self.Binaries.has_key((InfBianryUiItemObj)):
BinariesList = self.Binaries[InfBianryUiItemObj]
BinariesList.append((InfBianryUiItemObj, UiComment))
self.Binaries[InfBianryUiItemObj] = BinariesList
else:
BinariesList = []
BinariesList.append((InfBianryUiItemObj, UiComment))
self.Binaries[InfBianryUiItemObj] = BinariesList
if Ver != None and len(Ver) > 0:
self.CheckVer(Ver, __SupArchList)
if CommonBinary and len(CommonBinary) > 0:
self.ParseCommonBinary(CommonBinary, __SupArchList)
return True
def GetBinary(self):
return self.Binaries
|
|
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# This module is used for common utilities related to parsing test files
import collections
import codecs
import logging
import re
from collections import defaultdict
from os.path import isfile, isdir
from tests.common.test_dimensions import TableFormatInfo
logging.basicConfig(level=logging.INFO, format='%(threadName)s: %(message)s')
LOG = logging.getLogger('impala_test_suite')
# constants
SECTION_DELIMITER = "===="
SUBSECTION_DELIMITER = "----"
# The QueryTestSectionReader provides utility functions that help to parse content
# from a query test file
class QueryTestSectionReader(object):
@staticmethod
def build_query(query_section_text):
"""Build a query by stripping comments and trailing semi-colons."""
query_section_text = remove_comments(query_section_text)
return query_section_text.rstrip(';')
@staticmethod
def get_table_name_components(table_format, table_name, scale_factor=''):
"""
Returns a pair (db_name, tbl_name). If the table_name argument is
fully qualified, return the database name mentioned there,
otherwise get the default db name from the table format and scale
factor.
"""
# If table name is fully qualified return the db prefix
split = table_name.split('.')
assert len(split) <= 2, 'Unexpected table format: %s' % table_name
db_name = split[0] if len(split) == 2 else \
QueryTestSectionReader.get_db_name(table_format, scale_factor)
return (db_name, split[-1])
@staticmethod
def get_db_name(table_format, scale_factor=''):
"""
Get the database name to use.
Database names are dependent on the scale factor, file format, compression type
and compression codec. This method returns the appropriate database name to the
caller based on the table format information provided.
"""
if table_format.file_format == 'text' and table_format.compression_codec == 'none':
suffix = ''
elif table_format.compression_codec == 'none':
suffix = '_%s' % (table_format.file_format)
elif table_format.compression_type == 'record':
suffix = '_%s_record_%s' % (table_format.file_format,
table_format.compression_codec)
else:
suffix = '_%s_%s' % (table_format.file_format, table_format.compression_codec)
dataset = table_format.dataset.replace('-', '')
return dataset + scale_factor + suffix
def remove_comments(section_text):
return '\n'.join([l for l in section_text.split('\n') if not l.strip().startswith('#')])
def parse_query_test_file(file_name, valid_section_names=None, encoding=None):
"""
Reads the specified query test file accepting the given list of valid section names
Uses a default list of valid section names if valid_section_names is None
Returns the result as a list of dictionaries. Each dictionary in the list corresponds
to a test case and each key in the dictionary maps to a section in that test case.
"""
# Update the valid section names as we support other test types
# (ex. planner, data error)
section_names = valid_section_names
if section_names is None:
section_names = ['QUERY', 'RESULTS', 'TYPES', 'LABELS', 'SETUP', 'CATCH', 'ERRORS',
'USER']
return parse_test_file(file_name, section_names, encoding=encoding,
skip_unknown_sections=False)
def parse_table_constraints(constraints_file):
"""Reads a table contraints file, if one exists"""
schema_include = defaultdict(list)
schema_exclude = defaultdict(list)
if not isfile(constraints_file):
LOG.info('No schema constraints file file found')
else:
with open(constraints_file, 'rb') as constraints_file:
for line in constraints_file.readlines():
line = line.strip()
if not line or line.startswith('#'):
continue
# Format: table_name:<name>, constraint_type:<type>, table_format:<t1>,<t2>,...
table_name, constraint_type, table_formats =\
[value.split(':')[1].strip() for value in line.split(',', 2)]
if constraint_type == 'restrict_to':
schema_include[table_name.lower()] +=\
map(parse_table_format_constraint, table_formats.split(','))
elif constraint_type == 'exclude':
schema_exclude[table_name.lower()] +=\
map(parse_table_format_constraint, table_formats.split(','))
else:
raise ValueError, 'Unknown constraint type: %s' % constraint_type
return schema_include, schema_exclude
def parse_table_format_constraint(table_format_constraint):
# TODO: Expand how we parse table format constraints to support syntax such as
# a table format string with a wildcard character. Right now we don't do anything.
return table_format_constraint
def parse_test_file(test_file_name, valid_section_names, skip_unknown_sections=True,
encoding=None):
"""
Parses an Impala test file
Test files have the format:
==== <- Section
---- [Name] <- Named subsection
// some text
---- [Name2] <- Named subsection
...
====
The valid section names are passed in to this function. The encoding to use
when reading the data can be specified with the 'encoding' flag.
"""
with open(test_file_name, 'rb') as test_file:
file_data = test_file.read()
if encoding: file_data = file_data.decode(encoding)
return parse_test_file_text(file_data, valid_section_names,
skip_unknown_sections)
def parse_test_file_text(text, valid_section_names, skip_unknown_sections=True):
sections = list()
section_start_regex = re.compile(r'(?m)^%s' % SECTION_DELIMITER)
match = section_start_regex.search(text)
if match is not None:
# Assume anything before the first section (==== tag) is a header and ignore it
text = text[match.start():]
# Split the test file up into sections. For each section, parse all subsections.
for section in section_start_regex.split(text):
parsed_sections = collections.defaultdict(str)
for sub_section in re.split(r'(?m)^%s' % SUBSECTION_DELIMITER, section[1:]):
# Skip empty subsections
if not sub_section:
continue
lines = sub_section.split('\n')
subsection_name = lines[0].strip()
subsection_comment = None
subsection_info = [s.strip() for s in subsection_name.split(':')]
if(len(subsection_info) == 2):
subsection_name, subsection_comment = subsection_info
if subsection_name not in valid_section_names:
if skip_unknown_sections or not subsection_name:
print sub_section
print 'Unknown section %s' % subsection_name
continue
else:
raise RuntimeError, 'Unknown subsection: %s' % subsection_name
if subsection_name == 'QUERY' and subsection_comment:
parsed_sections['QUERY_NAME'] = subsection_comment
if subsection_name == 'RESULTS' and subsection_comment:
parsed_sections['VERIFIER'] = subsection_comment
parsed_sections[subsection_name] = '\n'.join([line for line in lines[1:-1]])
if parsed_sections:
sections.append(parsed_sections)
return sections
def write_test_file(test_file_name, test_file_sections, encoding=None):
"""
Given a list of test file sections, write out the corresponding test file
This is useful when updating the results of a test.
The file encoding can be specified in the 'encoding' parameter. If not specified
the default system encoding will be used.
"""
with codecs.open(test_file_name, 'w', encoding=encoding) as test_file:
test_file_text = list()
for test_case in test_file_sections:
test_file_text.append(SECTION_DELIMITER)
for section_name, section_value in test_case.items():
# Have to special case query name and verifier because they have annotations
# in the headers
if section_name in ['QUERY_NAME', 'VERIFIER']:
continue
# TODO: We need a more generic way of persisting the old test file.
# Special casing will blow up.
full_section_name = section_name
if section_name == 'QUERY' and test_case.get('QUERY_NAME'):
full_section_name = '%s: %s' % (section_name, test_case['QUERY_NAME'])
if section_name == 'RESULTS' and test_case.get('VERIFIER'):
full_section_name = '%s: %s' % (section_name, test_case['VERIFIER'])
test_file_text.append("%s %s" % (SUBSECTION_DELIMITER, full_section_name))
if test_case[section_name].strip():
test_file_text.append(test_case[section_name])
test_file_text.append(SECTION_DELIMITER)
test_file.write(('\n').join(test_file_text))
|
|
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_exec, h2o_glm, h2o_jobs
import h2o_print as h2p
SCIPY_INSTALLED = True
try:
import scipy as sp
import numpy as np
import sklearn as sk
print "numpy, scipy and sklearn are installed. Will do extra checks"
except ImportError:
print "numpy, scipy or sklearn is not installed. Will just do h2o stuff"
SCIPY_INSTALLED = False
#*********************************************************************************
def do_scipy_glm(self, bucket, csvPathname, L, family='binomial'):
h2p.red_print("Now doing sklearn")
h2p.red_print("\nsee http://scikit-learn.org/0.11/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression")
import numpy as np
import scipy as sp
from sklearn.linear_model import LogisticRegression
from numpy import loadtxt
csvPathnameFull = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True)
# make sure it does fp divide
C = 1/(L+0.0)
print "C regularization:", C
dataset = np.loadtxt(
open(csvPathnameFull,'r'),
skiprows=1, # skip the header
delimiter=',',
dtype='float');
print "\ncsv read for training, done"
n_features = len(dataset[0]) - 1;
print "n_features:", n_features
# don't want ID (col 0) or CAPSULE (col 1)
# get CAPSULE
target = [x[1] for x in dataset]
# slice off the first 2
train = np.array ( [x[2:] for x in dataset] )
n_samples, n_features = train.shape
print "n_samples:", n_samples, "n_features:", n_features
print "histogram of target"
print sp.histogram(target,3)
print "len(train):", len(train)
print "len(target):", len(target)
print "dataset shape:", dataset.shape
if family!='binomial':
raise Exception("Only have binomial logistic for scipy")
print "\nTrying l2"
clf2 = LogisticRegression(
C=C,
dual=False,
fit_intercept=True,
intercept_scaling=1,
penalty='l2',
tol=0.0001);
# train the classifier
start = time.time()
clf2.fit(train, target)
print "L2 fit took", time.time() - start, "seconds"
# print "coefficients:", clf2.coef_
cstring = "".join([("%.5e " % c) for c in clf2.coef_[0]])
h2p.green_print("sklearn L2 C", C)
h2p.green_print("sklearn coefficients:", cstring)
h2p.green_print("sklearn intercept:", "%.5e" % clf2.intercept_[0])
h2p.green_print("sklearn score:", clf2.score(train,target))
print "\nTrying l1"
clf1 = LogisticRegression(
C=C,
dual=False,
fit_intercept=True,
intercept_scaling=1,
penalty='l1',
tol=0.0001);
# train the classifier
start = time.time()
clf1.fit(train, target)
print "L1 fit took", time.time() - start, "seconds"
# print "coefficients:", clf1.coef_
cstring = "".join([("%.5e " % c) for c in clf1.coef_[0]])
h2p.green_print("sklearn L1 C", C)
h2p.green_print("sklearn coefficients:", cstring)
h2p.green_print("sklearn intercept:", "%.5e" % clf1.intercept_[0])
h2p.green_print("sklearn score:", clf1.score(train,target))
# attributes are accessed in the normal python way
dx = clf1.__dict__
dx.keys()
## ['loss', 'C', 'dual', 'fit_intercept', 'class_weight_label', 'label_',
## 'penalty', 'multi_class', 'raw_coef_', 'tol', 'class_weight',
## 'intercept_scaling']
#*********************************************************************************
def do_h2o_glm(self, bucket, csvPathname, L, family='binomial'):
h2p.red_print("\nNow doing h2o")
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='local', timeoutSecs=180)
# save the resolved pathname for use in the sklearn csv read below
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print inspect
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
x = 'ID'
y = 'CAPSULE'
family = family
alpha = '0'
lambda_ = L
nfolds = '0'
f = 'prostate'
modelKey = 'GLM_' + f
kwargs = {
'response' : y,
'ignored_cols' : x,
'family' : family,
'lambda' : lambda_,
'alpha' : alpha,
'n_folds' : nfolds, # passes if 0, fails otherwise
'destination_key' : modelKey,
}
timeoutSecs = 60
start = time.time()
glmResult = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
# this stuff was left over from when we got the result after polling the jobs list
# okay to do it again
# GLM2: when it redirects to the model view, we no longer have the job_key! (unlike the first response and polling)
(warnings, clist, intercept) = h2o_glm.simpleCheckGLM(self, glmResult, None, **kwargs)
cstring = "".join([("%.5e " % c) for c in clist])
h2p.green_print("h2o alpha ", alpha)
h2p.green_print("h2o lambda ", lambda_)
h2p.green_print("h2o coefficient list:", cstring)
h2p.green_print("h2o intercept", "%.5e " % intercept)
# other stuff in the json response
glm_model = glmResult['glm_model']
_names = glm_model['_names']
coefficients_names = glm_model['coefficients_names']
# the first submodel is the right one, if onely one lambda is provided as a parameter above
submodels = glm_model['submodels'][0]
beta = submodels['beta']
h2p.red_print("beta:", beta)
norm_beta = submodels['norm_beta']
iteration = submodels['iteration']
validation = submodels['validation']
auc = validation['auc']
aic = validation['aic']
null_deviance = validation['null_deviance']
residual_deviance = validation['residual_deviance']
print '_names', _names
print 'coefficients_names', coefficients_names
# did beta get shortened? the simple check confirms names/beta/norm_beta are same length
print 'beta', beta
print 'iteration', iteration
print 'auc', auc
#*********************************************************************************
# the actual test that will run both
#*********************************************************************************
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1, java_heap_GB=10)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_basic_cmp(self):
bucket = 'smalldata'
importFolderPath = "logreg"
csvFilename = 'prostate.csv'
csvPathname = importFolderPath + "/" + csvFilename
# use L for lambda in h2o, C=1/L in sklearn
family = 'binomial'
L = 1e-4
do_h2o_glm(self, bucket, csvPathname, L, family)
if SCIPY_INSTALLED:
do_scipy_glm(self, bucket, csvPathname, L, family)
# since we invert for C, can't use 0 (infinity)
L = 1e-13
# C in sklearn Specifies the strength of the regularization.
# The smaller it is the bigger in the regularization.
# we'll set it to 1/L
do_h2o_glm(self, bucket, csvPathname, L, family)
if SCIPY_INSTALLED:
do_scipy_glm(self, bucket, csvPathname, L, family)
if __name__ == '__main__':
h2o.unit_main()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of handler for split nodes for float columns.
The general idea in batch split finding is that each handler will accumulate its
own statistics on multiple workers. After some steps, the master runs
make_splits() sub-graph of each handler and each handler returns its best split
per partition.
The way we ensure consistency of statistics is by using stamp_tokens for read
and write operations. During each update of the model, a new stamp token is
created. This stamp token makes sure that updates from the previous iterations
are not included in the statistics for this iteration.
Inequality splits for float features are created similar to the method described
in Approximate Algorithm described in https://arxiv.org/pdf/1603.02754v3.pdf.
Weighted quantiles of the feature columns are computed in a distributed fashion
using quantile_ops.quantile_accumulator.
After certain number of steps of parallel accumulation of quantile statistics,
we decide on bucket boundaries. These bucket boundaries are then used for the
next N steps to accumulate gradients and hessians per bucket.
In this implementation, we gather quantile statistics and gradient statistics
concurrently. That means that we don't wait until we have enough quantile
statistics for bucketization before we start gathering gradient stats. Instead
during each step we create quantile stats for the next iteration and use the
previous quantile buckets for gradient stats accumulation.
In make_splits, we do these steps:
1) Get the buckets that were used creating for the gradient stats.
2) Create bucket boundaries for the next N iterations and clear the accumulated
quantile stats.
n3) Get the accumulated gradient stats and clear the accumulator. This step can
run in parallel to step 2.
4) For each leaf node in the current tree (partition):
4.1) Get the overall gain computed with gradients and hessians of all
examples that end up in this partition.
4.2) Compute tensors of left and right cumulative sum of gradients, hessians
and gain. The first dimension of these tensors are the bucket
boundaries.
4.3) Find the gains for all bucket boundaries:
split_gains = left_gain + right_gain - overall_gain.
4.4) Find the bucket boundary that has the best gain (argmax(split_gains))
4.5) For Sparse handler, we also consider the gain for when the examples go
the left child and when the examples go to the right child and pick the
default direction that yields the most gain.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.boosted_trees.lib.learner.batch import base_split_handler
from tensorflow.contrib.boosted_trees.python.ops import quantile_ops
from tensorflow.contrib.boosted_trees.python.ops import split_handler_ops
from tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
_BIAS_FEATURE_ID = -1
# Pattern to remove all non alpha numeric from a string.
_PATTERN = re.compile(r"[\W_]+")
class InequalitySplitHandler(base_split_handler.BaseSplitHandler):
"""Base class for handlers of inequality splits."""
def __init__(self,
l1_regularization,
l2_regularization,
tree_complexity_regularization,
min_node_weight,
feature_column_group_id,
epsilon,
num_quantiles,
gradient_shape,
hessian_shape,
multiclass_strategy,
init_stamp_token=0,
name=None):
"""Initialize the internal state for this split handler.
Args:
l1_regularization: L1 regularization applied for this split handler.
l2_regularization: L2 regularization applied for this split handler.
tree_complexity_regularization: Tree complexity regularization applied
for this split handler.
min_node_weight: Minimum sum of weights of examples in each partition to
be considered for splitting.
feature_column_group_id: Feature column group index.
epsilon: A float, the error bound for quantile computation.
num_quantiles: An int, the number of buckets to create from the histogram.
gradient_shape: A TensorShape, containing shape of gradients.
hessian_shape: A TensorShape, containing shape of hessians.
multiclass_strategy: Strategy describing how to treat multiclass problems.
init_stamp_token: A tensor containing an scalar for initial stamp of the
stamped objects.
name: An optional handler name.
"""
super(InequalitySplitHandler, self).__init__(
name=name,
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=feature_column_group_id,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=multiclass_strategy)
self._stats_accumulator = stats_accumulator_ops.StatsAccumulator(
init_stamp_token,
gradient_shape,
hessian_shape,
name="StatsAccumulator/{}".format(self._name))
self._quantile_accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token,
epsilon=epsilon,
num_quantiles=num_quantiles,
name="QuantileAccumulator/{}".format(self._name))
class DenseSplitHandler(InequalitySplitHandler):
"""Computes stats and finds the best inequality splits on dense columns."""
def __init__(self,
dense_float_column,
l1_regularization,
l2_regularization,
tree_complexity_regularization,
min_node_weight,
feature_column_group_id,
epsilon,
num_quantiles,
gradient_shape,
hessian_shape,
multiclass_strategy,
init_stamp_token=0,
name=None):
"""Initialize the internal state for this split handler.
Args:
dense_float_column: A `Tensor` column associated with this handler.
l1_regularization: L1 regularization applied for this split handler.
l2_regularization: L2 regularization applied for this split handler.
tree_complexity_regularization: Tree complexity regularization applied
for this split handler.
min_node_weight: Minimum sum of weights of examples in each partition to
be considered for splitting.
feature_column_group_id: Feature column group index.
epsilon: A float, the error bound for quantile computation.
num_quantiles: An int, the number of buckets to create from the histogram.
gradient_shape: A TensorShape, containing shape of gradients.
hessian_shape: A TensorShape, containing shape of hessians.
multiclass_strategy: Strategy describing how to treat multiclass problems.
init_stamp_token: A tensor containing an scalar for initial stamp of the
stamped objects.
name: An optional handler name.
"""
super(DenseSplitHandler, self).__init__(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=feature_column_group_id,
epsilon=epsilon,
num_quantiles=num_quantiles,
init_stamp_token=init_stamp_token,
name=name,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=multiclass_strategy)
self._dense_float_column = dense_float_column
# Register dense_make_stats_update function as an Op to the graph.
g = ops.get_default_graph()
dense_make_stats_update.add_to_graph(g)
def scheduled_reads(self):
return [self._quantile_accumulator.schedule_get_buckets()]
def update_stats(self, stamp_token, example_partition_ids, gradients,
hessians, empty_gradients, empty_hessians, weights,
is_active, scheduled_reads):
"""Updates the state for dense split handler.
Args:
stamp_token: An int32 scalar tensor containing the current stamp token.
example_partition_ids: A dense tensor, containing an int32 for each
example which is the partition id that the example ends up in.
gradients: A dense tensor of gradients.
hessians: A dense tensor of hessians.
empty_gradients: A dense empty tensor of the same shape (for dimensions >
0) as gradients.
empty_hessians: A dense empty tensor of the same shape (for dimensions >
0) as hessians.
weights: A dense float32 tensor with a weight for each example.
is_active: A boolean tensor that says if this handler is active or not.
One value for the current layer and one value for the next layer.
scheduled_reads: List of scheduled reads for this handler.
Returns:
The op that updates the stats for this handler.
"""
name = _PATTERN.sub("", self._name)
with ops.name_scope(name, "DenseSplitHandler"):
are_buckets_ready, buckets = scheduled_reads[0]
(quantile_values, quantile_weights, example_partition_ids,
feature_ids, gradients, hessians) = dense_make_stats_update(
is_active, are_buckets_ready, self._dense_float_column, buckets,
example_partition_ids, gradients, hessians, weights, empty_gradients,
empty_hessians)
update_quantiles = self._quantile_accumulator.schedule_add_summary(
stamp_token=stamp_token,
column=quantile_values,
example_weights=quantile_weights)
update_stats = self._stats_accumulator.schedule_add(
example_partition_ids, feature_ids, gradients, hessians)
return control_flow_ops.no_op(), [update_quantiles, update_stats]
def make_splits(self, stamp_token, next_stamp_token, class_id):
"""Create the best split using the accumulated stats and flush the state."""
# Get the bucket boundaries
are_splits_ready, buckets = (
self._quantile_accumulator.get_buckets(stamp_token))
# After we receive the boundaries from previous iteration we can flush
# the quantile accumulator.
with ops.control_dependencies([buckets]):
flush_quantiles = self._quantile_accumulator.flush(
stamp_token=stamp_token, next_stamp_token=next_stamp_token)
# Get the aggregated gradients and hessians per <partition_id, feature_id>
# pair.
# In order to distribute the computation on all the PSs we use the PS that
# had the stats accumulator on.
with ops.device(None):
with ops.device(self._stats_accumulator.resource().device):
num_minibatches, partition_ids, bucket_ids, gradients, hessians = (
self._stats_accumulator.flush(stamp_token, next_stamp_token))
# Put quantile and stats accumulator flushing in the dependency path.
are_splits_ready = control_flow_ops.with_dependencies(
[flush_quantiles, partition_ids], are_splits_ready)
partition_ids, gains, split_infos = (
split_handler_ops.build_dense_inequality_splits(
num_minibatches=num_minibatches,
bucket_boundaries=buckets,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
class_id=class_id,
feature_column_group_id=self._feature_column_group_id,
l1_regularization=self._l1_regularization,
l2_regularization=self._l2_regularization,
tree_complexity_regularization=self.
_tree_complexity_regularization,
min_node_weight=self._min_node_weight,
multiclass_strategy=self._multiclass_strategy))
return (are_splits_ready, partition_ids, gains, split_infos)
class SparseSplitHandler(InequalitySplitHandler):
"""Computes stats and finds the best inequality splits on sparse columns."""
def __init__(self,
sparse_float_column,
l1_regularization,
l2_regularization,
tree_complexity_regularization,
min_node_weight,
feature_column_group_id,
epsilon,
num_quantiles,
gradient_shape,
hessian_shape,
multiclass_strategy,
init_stamp_token=0,
name=None):
"""Initialize the internal state for this split handler.
Args:
sparse_float_column: A `SparseTensor` column associated with this handler.
l1_regularization: L1 regularization applied for this split handler.
l2_regularization: L2 regularization applied for this split handler.
tree_complexity_regularization: Tree complexity regularization applied
for this split handler.
min_node_weight: Minimum sum of weights of examples in each partition to
be considered for splitting.
feature_column_group_id: Feature column group index.
epsilon: A float, the error bound for quantile computation.
num_quantiles: An int, the number of buckets to create from the histogram.
gradient_shape: A TensorShape, containing shape of gradients.
hessian_shape: A TensorShape, containing shape of hessians.
multiclass_strategy: Strategy describing how to treat multiclass problems.
init_stamp_token: A tensor containing an scalar for initial stamp of the
stamped objects.
name: An optional handler name.
"""
super(SparseSplitHandler, self).__init__(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=feature_column_group_id,
epsilon=epsilon,
num_quantiles=num_quantiles,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=multiclass_strategy,
init_stamp_token=init_stamp_token,
name=name)
# Register sparse_make_stats_update function as an Op to the graph.
g = ops.get_default_graph()
sparse_make_stats_update.add_to_graph(g)
self._sparse_float_column = sparse_float_column
def scheduled_reads(self):
return [self._quantile_accumulator.schedule_get_buckets()]
def update_stats(self, stamp_token, example_partition_ids, gradients,
hessians, empty_gradients, empty_hessians, weights,
is_active, scheduled_reads):
"""Updates the state for dense split handler.
Args:
stamp_token: An int32 scalar tensor containing the current stamp token.
example_partition_ids: A dense tensor, containing an int32 for each
example which is the partition id that the example ends up in.
gradients: A dense tensor of gradients.
hessians: A dense tensor of hessians.
empty_gradients: A dense empty tensor of the same shape (for dimensions >
0) as gradients.
empty_hessians: A dense empty tensor of the same shape (for dimensions >
0) as hessians.
weights: A dense float32 tensor with a weight for each example.
is_active: A boolean tensor that says if this handler is active or not.
One value for the current layer and one value for the next layer.
scheduled_reads: List of results from the scheduled reads.
Returns:
The op that updates the stats for this handler.
"""
are_buckets_ready, buckets = scheduled_reads[0]
with ops.name_scope(self._name, "SparseSplitHandler"):
(quantile_indices, quantile_values, quantile_shapes, quantile_weights,
example_partition_ids,
feature_ids, gradients, hessians) = sparse_make_stats_update(
is_active, are_buckets_ready, self._sparse_float_column.indices,
self._sparse_float_column.values,
self._sparse_float_column.dense_shape, buckets,
example_partition_ids, gradients, hessians, weights, empty_gradients,
empty_hessians)
update_quantiles = self._quantile_accumulator.schedule_add_summary(
stamp_token=stamp_token,
column=sparse_tensor.SparseTensor(quantile_indices, quantile_values,
quantile_shapes),
example_weights=quantile_weights)
update_stats = self._stats_accumulator.schedule_add(
example_partition_ids, feature_ids, gradients, hessians)
return (control_flow_ops.no_op(), [update_quantiles, update_stats])
def make_splits(self, stamp_token, next_stamp_token, class_id):
"""Create the best split using the accumulated stats and flush the state."""
# Get the bucket boundaries
are_splits_ready, buckets = (
self._quantile_accumulator.get_buckets(stamp_token))
# After we receive the boundaries from previous iteration we can flush
# the quantile accumulator.
with ops.control_dependencies([buckets]):
flush_quantiles = self._quantile_accumulator.flush(
stamp_token=stamp_token, next_stamp_token=next_stamp_token)
with ops.device(None):
with ops.device(self._stats_accumulator.resource().device):
num_minibatches, partition_ids, bucket_ids, gradients, hessians = (
self._stats_accumulator.flush(stamp_token, next_stamp_token))
# Put quantile and stats accumulator flushing in the dependency path.
are_splits_ready = control_flow_ops.with_dependencies(
[flush_quantiles, partition_ids], are_splits_ready)
partition_ids, gains, split_infos = (
split_handler_ops.build_sparse_inequality_splits(
num_minibatches=num_minibatches,
bucket_boundaries=buckets,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
class_id=class_id,
feature_column_group_id=self._feature_column_group_id,
l1_regularization=self._l1_regularization,
l2_regularization=self._l2_regularization,
tree_complexity_regularization=self.
_tree_complexity_regularization,
min_node_weight=self._min_node_weight,
bias_feature_id=_BIAS_FEATURE_ID,
multiclass_strategy=self._multiclass_strategy))
return (are_splits_ready, partition_ids, gains, split_infos)
@function.Defun(dtypes.bool, dtypes.bool, dtypes.float32, dtypes.float32,
dtypes.int32, dtypes.float32, dtypes.float32, dtypes.float32,
dtypes.float32, dtypes.float32)
def dense_make_stats_update(is_active, are_buckets_ready, float_column,
quantile_buckets, example_partition_ids, gradients,
hessians, weights, empty_gradients, empty_hessians):
"""Updates the state for dense split handler."""
empty_float = constant_op.constant([], dtype=dtypes.float32)
quantile_values, quantile_weights = control_flow_ops.cond(
is_active[1], # For the next layer, this handler is inactive.
lambda: (float_column, weights),
lambda: (empty_float, empty_float))
def ready_inputs_fn():
"""Branch to execute when quantiles are ready."""
quantized_feature = quantile_ops.quantiles([float_column], [],
[quantile_buckets], [], [])
quantized_feature = math_ops.cast(quantized_feature[0], dtypes.int64)
quantized_feature = array_ops.squeeze(quantized_feature)
return (example_partition_ids, quantized_feature, gradients, hessians)
def not_ready_inputs_fn():
return (constant_op.constant([], dtype=dtypes.int32),
constant_op.constant([[]], dtype=dtypes.int64, shape=[1, 2]),
empty_gradients, empty_hessians)
example_partition_ids, feature_ids, gradients, hessians = (
control_flow_ops.cond(
math_ops.logical_and(are_buckets_ready, is_active[0]),
ready_inputs_fn, not_ready_inputs_fn))
return (quantile_values, quantile_weights, example_partition_ids, feature_ids,
gradients, hessians)
@function.Defun(dtypes.bool, dtypes.bool, dtypes.int64, dtypes.float32,
dtypes.int64, dtypes.float32, dtypes.int32, dtypes.float32,
dtypes.float32, dtypes.float32, dtypes.float32, dtypes.float32)
def sparse_make_stats_update(
is_active, are_buckets_ready, sparse_column_indices, sparse_column_values,
sparse_column_shape, quantile_buckets, example_partition_ids, gradients,
hessians, weights, empty_gradients, empty_hessians):
"""Updates the state for this split handler."""
def quantiles_ready():
"""The subgraph for when the quantiles are ready."""
quantized_feature = quantile_ops.quantiles([], [sparse_column_values], [],
[quantile_buckets],
[sparse_column_indices])
quantized_feature = math_ops.cast(quantized_feature[1], dtypes.int64)
quantized_feature = array_ops.squeeze(quantized_feature)
example_indices, _ = array_ops.split(
sparse_column_indices, num_or_size_splits=2, axis=1)
example_indices = array_ops.squeeze(example_indices, [1])
filtered_gradients = array_ops.gather(gradients, example_indices)
filtered_hessians = array_ops.gather(hessians, example_indices)
filtered_partition_ids = array_ops.gather(example_partition_ids,
example_indices)
unique_partitions, mapped_partitions = array_ops.unique(
example_partition_ids)
# Compute aggregate stats for each partition.
per_partition_gradients = math_ops.unsorted_segment_sum(
gradients, mapped_partitions, array_ops.size(unique_partitions))
per_partition_hessians = math_ops.unsorted_segment_sum(
hessians, mapped_partitions, array_ops.size(unique_partitions))
# Prepend a bias feature per partition that accumulates the stats for all
# examples in that partition.
bias_feature_ids = array_ops.fill(
array_ops.shape(unique_partitions), _BIAS_FEATURE_ID)
bias_feature_ids = math_ops.cast(bias_feature_ids, dtypes.int64)
zeros = array_ops.zeros_like(bias_feature_ids)
bias_feature_ids = array_ops.stack([bias_feature_ids, zeros], axis=1)
partition_ids = array_ops.concat(
[unique_partitions, filtered_partition_ids], 0)
filtered_gradients = array_ops.concat(
[per_partition_gradients, filtered_gradients], 0)
filtered_hessians = array_ops.concat(
[per_partition_hessians, filtered_hessians], 0)
bucket_ids = array_ops.concat([bias_feature_ids, quantized_feature], 0)
return partition_ids, bucket_ids, filtered_gradients, filtered_hessians
def quantiles_not_ready():
"""The subgraph for when the quantiles are not ready."""
return (constant_op.constant([], dtype=dtypes.int32),
constant_op.constant([], dtype=dtypes.int64, shape=[1, 2]),
empty_gradients, empty_hessians)
empty_float = constant_op.constant([], dtype=dtypes.float32)
handler_not_active = (constant_op.constant(
[], dtype=dtypes.int64, shape=[0, 2]), empty_float, constant_op.constant(
[0, 1], dtype=dtypes.int64), empty_float)
handler_active = (sparse_column_indices, sparse_column_values,
sparse_column_shape, weights)
quantile_indices, quantile_values, quantile_shape, quantile_weights = (
control_flow_ops.cond(is_active[1], lambda: handler_active,
lambda: handler_not_active))
example_partition_ids, feature_ids, gradients, hessians = (
control_flow_ops.cond(are_buckets_ready, quantiles_ready,
quantiles_not_ready))
return (quantile_indices, quantile_values, quantile_shape, quantile_weights,
example_partition_ids, feature_ids, gradients, hessians)
|
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from robot.errors import DataError
from .filelogger import FileLogger
from .loggerhelper import AbstractLogger, AbstractLoggerProxy
from .monitor import CommandLineMonitor
from .stdoutlogsplitter import StdoutLogSplitter
class Logger(AbstractLogger):
"""A global logger proxy to which new loggers may be registered.
Whenever something is written to LOGGER in code, all registered loggers are
notified. Messages are also cached and cached messages written to new
loggers when they are registered.
Tools using Robot Framework's internal modules should register their own
loggers at least to get notifications about errors and warnings. A shortcut
to get errors/warnings into console is using 'register_console_logger'.
"""
def __init__(self, register_console_logger=True):
self._loggers = LoggerCollection()
self._message_cache = []
self._console_logger = None
self._started_keywords = 0
self._error_occurred = False
self._error_listener = None
self._prev_log_message_handlers = []
if register_console_logger:
self.register_console_logger()
def disable_message_cache(self):
self._message_cache = None
def register_logger(self, *loggers):
for log in loggers:
logger = self._loggers.register_regular_logger(log)
self._relay_cached_messages_to(logger)
def register_context_changing_logger(self, logger):
log = self._loggers.register_context_changing_logger(logger)
self._relay_cached_messages_to(log)
def _relay_cached_messages_to(self, logger):
if self._message_cache:
for msg in self._message_cache[:]:
logger.message(msg)
def unregister_logger(self, *loggers):
for log in loggers:
self._loggers.unregister_logger(log)
def register_console_logger(self, width=78, colors='AUTO', markers='AUTO',
stdout=None, stderr=None):
logger = CommandLineMonitor(width, colors, markers, stdout, stderr)
if self._console_logger:
self._loggers.unregister_logger(self._console_logger)
self._console_logger = logger
self._loggers.register_regular_logger(logger)
def unregister_console_logger(self):
if not self._console_logger:
return None
logger = self._console_logger
self._loggers.unregister_logger(logger)
self._console_logger = None
return logger
def register_file_logger(self, path=None, level='INFO'):
if not path:
path = os.environ.get('ROBOT_SYSLOG_FILE', 'NONE')
level = os.environ.get('ROBOT_SYSLOG_LEVEL', level)
if path.upper() == 'NONE':
return
try:
logger = FileLogger(path, level)
except DataError as err:
self.error("Opening syslog file '%s' failed: %s" % (path, unicode(err)))
else:
self.register_logger(logger)
def register_error_listener(self, listener):
self._error_listener = listener
if self._error_occurred:
listener()
def message(self, msg):
"""Messages about what the framework is doing, warnings, errors, ..."""
for logger in self._loggers.all_loggers():
logger.message(msg)
if self._message_cache is not None:
self._message_cache.append(msg)
if msg.level == 'ERROR':
self._error_occurred = True
if self._error_listener:
self._error_listener()
def _log_message(self, msg):
"""Log messages written (mainly) by libraries"""
for logger in self._loggers.all_loggers():
logger.log_message(msg)
if msg.level == 'WARN':
self.message(msg)
log_message = message
def log_output(self, output):
for msg in StdoutLogSplitter(output):
self.log_message(msg)
def enable_library_import_logging(self):
self._prev_log_message_handlers.append(self.log_message)
self.log_message = self.message
def disable_library_import_logging(self):
self.log_message = self._prev_log_message_handlers.pop()
def output_file(self, name, path):
"""Finished output, report, log, debug, or xunit file"""
for logger in self._loggers.all_loggers():
logger.output_file(name, path)
def close(self):
for logger in self._loggers.all_loggers():
logger.close()
self._loggers = LoggerCollection()
self._message_cache = []
def start_suite(self, suite):
for logger in self._loggers.starting_loggers():
logger.start_suite(suite)
def end_suite(self, suite):
for logger in self._loggers.ending_loggers():
logger.end_suite(suite)
def start_test(self, test):
for logger in self._loggers.starting_loggers():
logger.start_test(test)
def end_test(self, test):
for logger in self._loggers.ending_loggers():
logger.end_test(test)
def start_keyword(self, keyword):
self._started_keywords += 1
self.log_message = self._log_message
for logger in self._loggers.starting_loggers():
logger.start_keyword(keyword)
def end_keyword(self, keyword):
self._started_keywords -= 1
for logger in self._loggers.ending_loggers():
logger.end_keyword(keyword)
if not self._started_keywords:
self.log_message = self.message
def __iter__(self):
return iter(self._loggers)
class LoggerCollection(object):
def __init__(self):
self._regular_loggers = []
self._context_changing_loggers = []
def register_regular_logger(self, logger):
self._regular_loggers.append(_LoggerProxy(logger))
return self._regular_loggers[-1]
def register_context_changing_logger(self, logger):
self._context_changing_loggers.append(_LoggerProxy(logger))
return self._context_changing_loggers[-1]
def unregister_logger(self, logger):
self._regular_loggers = [proxy for proxy in self._regular_loggers
if proxy.logger is not logger]
self._context_changing_loggers = [proxy for proxy
in self._context_changing_loggers
if proxy.logger is not logger]
def starting_loggers(self):
return self.all_loggers()
def ending_loggers(self):
return self._regular_loggers + self._context_changing_loggers
def all_loggers(self):
return self._context_changing_loggers + self._regular_loggers
def __iter__(self):
return iter(self.all_loggers())
class _LoggerProxy(AbstractLoggerProxy):
_methods = ['message', 'log_message', 'output_file', 'close',
'start_suite', 'end_suite', 'start_test', 'end_test',
'start_keyword', 'end_keyword']
LOGGER = Logger()
|
|
#!/usr/bin/env python3
#
# genetic.py
#
import random
from math import pi, sqrt
from simulation import Simulation
MAXIMIZE, MINIMIZE = 11, 22
class Individual(object):
alleles = (0,1)
length = 10
seperator = ''
optimization = MINIMIZE
def __init__(self, chromosome=None):
self.chromosome = chromosome or self._makechromosome()
self.score = None # set during evaluation
def _makechromosome(self):
"makes a chromosome from randomly selected alleles."
return [random.choice(self.alleles) for gene in range(self.length)]
def evaluate(self, optimum=None):
"this method MUST be overridden to evaluate individual fitness score."
return self.score
def crossover(self, other):
"override this method to use your preferred crossover method."
return self._twopoint(other)
def mutate(self, gene):
"override this method to use your preferred mutation method."
self._pick(gene)
# sample mutation method
def _pick(self, gene):
"chooses a random allele to replace this gene's allele."
self.chromosome[gene] = random.choice(self.alleles)
# sample crossover method
def _twopoint(self, other):
"creates offspring via two-point crossover between mates."
left, right = self._pickpivots()
def mate(p0, p1):
chromosome = p0.chromosome[:]
chromosome[left:right] = p1.chromosome[left:right]
child = p0.__class__(chromosome)
child._repair(p0, p1)
return child
return mate(self, other), mate(other, self)
# some crossover helpers ...
def _repair(self, parent1, parent2):
"override this method, if necessary, to fix duplicated genes."
pass
def _pickpivots(self):
left = random.randrange(1, self.length-2)
right = random.randrange(left, self.length-1)
return left, right
#
# other methods
#
def __repr__(self):
"returns string representation of self"
chromosome_str = ''
for gene in self.chromosome:
if gene:
chromosome_str += '1'
else:
chromosome_str += '0'
return '<%s chromosome="%s" score=%s>' % \
(self.__class__.__name__,
chromosome_str, self.score)
def copy(self):
twin = self.__class__(self.chromosome[:])
twin.score = self.score
return twin
class Environment(object):
def __init__(self, kind, population=None, size=100, maxgenerations=100, \
generation=0, crossover_rate=0.90, mutation_rate=0.02, \
optimum=None):
self.kind = kind
self.size = size
self.optimum = optimum
self.population = population or self._makepopulation()
for individual in self.population:
individual.evaluate(self.optimum)
self.crossover_rate = crossover_rate
self.mutation_rate = mutation_rate
self.maxgenerations = maxgenerations
self.generation = generation
self.report()
def _makepopulation(self):
return [self.kind() for individual in range(self.size)]
def run(self):
try:
while not self._goal():
self.step()
except KeyboardInterrupt:
pass
best = self.best.copy()
s = Simulation(show=True)
while True:
print(s.mySimul([(best.chromosome[0], best.chromosome[1]), best.chromosome[2], best.chromosome[3],
best.chromosome[4], best.chromosome[5], best.chromosome[6], best.chromosome[7], best.chromosome[8]]))
def _goal(self):
return self.generation >= self.maxgenerations or \
self.best.score >= self.optimum
def step(self):
self.population.sort(key=lambda indiv: indiv.score, reverse=True)
self._crossover()
self.generation += 1
self.report()
def _crossover(self):
next_population = [self.best.copy()]
while len(next_population) < self.size:
mate1 = self._select()
if random.random() < self.crossover_rate:
mate2 = self._select()
offspring = mate1.crossover(mate2)
else:
offspring = [mate1.copy()]
for individual in offspring:
self._mutate(individual)
individual.evaluate(self.optimum)
next_population.append(individual)
self.population = next_population[:self.size]
def _select(self):
"override this to use your preferred selection method"
return self._tournament()
def _mutate(self, individual):
for gene in range(individual.length):
if random.random() < self.mutation_rate:
individual.mutate(gene)
#
# sample selection method
#
def _tournament(self, size=8, choosebest=0.90):
competitors = [random.choice(self.population) for i in range(size)]
competitors.sort(key=lambda indiv: indiv.score, reverse=True)
if random.random() < choosebest:
return competitors[0]
else:
return random.choice(competitors[1:])
def best():
doc = "individual with best fitness score in population."
def fget(self):
return self.population[0]
return locals()
best = property(**best())
def report(self):
print ("="*70)
print ("generation: ", self.generation)
print ("best: ", self.best)
class MyIndividual(Individual):
alleles = [(550, 570), (270, 290), (20, 80), (10, 80), (10, 30), (0, pi/2), (0, pi/2), (0, pi/2), (0, pi/2)]
length = 9
"""
pos_w -- the initial position
pos_h -- the initial position
ul -- the length of the upper leg
ll -- the length of the lower leg
w -- the width of the robot
lua -- the angle of the left hip
lla -- the angle of the left ankle
rua -- the angle of the right hip
rla -- the angle of the right ankle
"""
def _makechromosome(self):
"makes a chromosome from randomly selected alleles."
return [random.uniform(self.alleles[gene][0], self.alleles[gene][1]) for gene in range(self.length)]
# sample mutation method
def _pick(self, gene):
"chooses a random allele to replace this gene's allele."
self.chromosome[gene] = random.uniform(self.alleles[gene][0], self.alleles[gene][1])
def evaluate(self, optimum=None):
"this method MUST be overridden to evaluate individual fitness score."
s = Simulation(show=False)
iterations, pos, ke = s.mySimul([(self.chromosome[0], self.chromosome[1]), self.chromosome[2], self.chromosome[3],
self.chromosome[4], self.chromosome[5], self.chromosome[6], self.chromosome[7], self.chromosome[8]])
#dist = sqrt(pos[0]**2 + pos[1]**2)
dist = pos[0]
score = (600-dist)/6 # 0 - 100
# score = sqrt(ke) + sqrt(iterations) + score**2
self.score = score
def __repr__(self):
"returns string representation of self"
chromosome_str = ''
for gene in range (len(self.chromosome)):
chromosome_str += ' ' + str(self.chromosome[gene])
return '<%s chromosome="%s" \nscore=%s>' % \
(self.__class__.__name__,
chromosome_str, self.score)
e = Environment(MyIndividual, maxgenerations=300, mutation_rate=0.1, optimum=100)
e.run()
|
|
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module provides general utility operations that wrap specific ``xarray`` functions.
The intention is to make available the ``xarray`` API as a set of general, domain-independent
utility functions.
All operations in this module are tagged with the ``"utility"`` tag.
"""
import pandas as pd
import xarray as xr
from datetime import timezone
from cate.core.ds import NetworkError, DataAccessError
from cate.core.op import op, op_input, op_return
from cate.core.types import DatasetLike, PointLike, TimeLike, DictLike, Arbitrary, Literal, ValidationError
from cate.util.monitor import Monitor
@op(tags=['utility'])
@op_input('ds_1', data_type=DatasetLike)
@op_input('ds_2', data_type=DatasetLike)
@op_input('ds_3', data_type=DatasetLike)
@op_input('ds_4', data_type=DatasetLike)
@op_input('join', value_set=["outer", "inner", "left", "right", "exact"])
@op_input('compat', value_set=["identical", "equals", "broadcast_equals", "no_conflicts"])
def merge(ds_1: DatasetLike.TYPE,
ds_2: DatasetLike.TYPE,
ds_3: DatasetLike.TYPE = None,
ds_4: DatasetLike.TYPE = None,
join: str = 'outer',
compat: str = 'no_conflicts') -> xr.Dataset:
"""
Merge up to four datasets to produce a new dataset with combined variables from each input dataset.
This is a wrapper for the ``xarray.merge()`` function.
For documentation refer to xarray documentation at
http://xarray.pydata.org/en/stable/generated/xarray.Dataset.merge.html#xarray.Dataset.merge
The *compat* argument indicates how to compare variables of the same name for potential conflicts:
* "broadcast_equals": all values must be equal when variables are broadcast
against each other to ensure common dimensions.
* "equals": all values and dimensions must be the same.
* "identical": all values, dimensions and attributes must be the same.
* "no_conflicts": only values which are not null in both datasets must be equal.
The returned dataset then contains the combination of all non-null values.
:param ds_1: The first input dataset.
:param ds_2: The second input dataset.
:param ds_3: An optional 3rd input dataset.
:param ds_4: An optional 4th input dataset.
:param join: How to combine objects with different indexes.
:param compat: How to compare variables of the same name for potential conflicts.
:return: A new dataset with combined variables from each input dataset.
"""
ds_1 = DatasetLike.convert(ds_1)
ds_2 = DatasetLike.convert(ds_2)
ds_3 = DatasetLike.convert(ds_3)
ds_4 = DatasetLike.convert(ds_4)
datasets = []
for ds in (ds_1, ds_2, ds_3, ds_4):
if ds is not None:
included = False
for ds2 in datasets:
if ds is ds2:
included = True
if not included:
datasets.append(ds)
if len(datasets) == 0:
raise ValidationError('At least two different datasets must be given')
elif len(datasets) == 1:
return datasets[0]
else:
return xr.merge(datasets, compat=compat, join=join)
@op(tags=['utility'])
@op_input('ds', data_type=DatasetLike)
@op_input('point', data_type=PointLike, units='degree')
@op_input('time', data_type=TimeLike)
@op_input('indexers', data_type=DictLike)
@op_input('method', value_set=['nearest', 'ffill', 'bfill'])
def sel(ds: DatasetLike.TYPE,
point: PointLike.TYPE = None,
time: TimeLike.TYPE = None,
indexers: DictLike.TYPE = None,
method: str = 'nearest') -> xr.Dataset:
"""
Return a new dataset with each array indexed by tick labels along the specified dimension(s).
This is a wrapper for the ``xarray.sel()`` function.
For documentation refer to xarray documentation at
http://xarray.pydata.org/en/stable/generated/xarray.Dataset.sel.html#xarray.Dataset.sel
:param ds: The dataset from which to select.
:param point: Optional geographic point given by longitude and latitude
:param time: Optional time
:param indexers: Keyword arguments with names matching dimensions and values given by scalars,
slices or arrays of tick labels. For dimensions with multi-index, the indexer may also be
a dict-like object with keys matching index level names.
:param method: Method to use for inexact matches:
* None: only exact matches
* ``pad`` / ``ffill``: propagate last valid index value forward
* ``backfill`` / ``bfill``: propagate next valid index value backward
* ``nearest`` (default): use nearest valid index value
:return: A new Dataset with the same contents as this dataset, except each variable and dimension
is indexed by the appropriate indexers. In general, each variable's data will be a view of the
variable's data in this dataset.
"""
ds = DatasetLike.convert(ds)
point = PointLike.convert(point)
time = TimeLike.convert(time)
indexers = DictLike.convert(indexers)
indexers = dict(indexers or {})
if point is not None:
indexers.setdefault('lon', point.x)
indexers.setdefault('lat', point.y)
if time is not None:
indexers.setdefault('time', time)
# Filter out non-existent coordinates
indexers = {name: value for name, value in indexers.items() if name in ds.coords}
return ds.sel(method=method, **indexers)
@op(tags=['utility'])
def from_data_frame(df: pd.DataFrame) -> xr.Dataset:
"""
Convert the given dataframe to an xarray dataset.
This is a wrapper for the ``xarray.from_dataframe()`` function.
For documentation refer to xarray documentation at
http://xarray.pydata.org/en/stable/generated/xarray.Dataset.from_dataframe.html#xarray.Dataset.from_dataframe
:param df: Dataframe to convert
:return: A dataset created from the given dataframe
"""
return xr.Dataset.from_dataframe(df)
@op(tags=['utility'])
@op_input('value', data_type=Arbitrary)
@op_return(data_type=Arbitrary)
def identity(value: Arbitrary.TYPE) -> Arbitrary.TYPE:
"""
Return the given value.
This operation can be useful to create constant resources to be used as input for other operations.
:param value: An arbitrary (Python) value.
"""
return value
@op(tags=['utility'])
@op_input('value', data_type=Literal)
@op_return(data_type=Arbitrary)
def literal(value: Literal.TYPE) -> Arbitrary.TYPE:
"""
Return the given value.
This operation can be useful to create constant resources to be used as input for other operations.
:param value: An arbitrary (Python) literal.
"""
return Literal.convert(value)
@op(tags=['utility'])
def dummy_ds(lon_dim: int = 360,
lat_dim: int = 180,
time_dim: int = 5) -> xr.Dataset:
"""
Create a dummy dataset.
:param lon_dim: Number of grid cells in longitude direction
:param lat_dim: Number of grid cells in latitude direction
:param time_dim: Number of time steps
:return: a dummy dataset
"""
import numpy as np
temperature = 15 + 8 * np.random.randn(time_dim, lat_dim, lon_dim)
precipitation = 10 * np.random.rand(time_dim, lat_dim, lon_dim)
lon_delta = 360. / lon_dim
lat_delta = 180. / lat_dim
lon = np.arange(-180. + 0.5 * lon_delta, 180., lon_delta)
lat = np.arange(-90. + 0.5 * lat_delta, 90., lat_delta)
time = pd.date_range('2014-09-06', periods=time_dim)
return xr.Dataset({'temperature': (['time', 'lat', 'lon'], temperature),
'precipitation': (['time', 'lat', 'lon'], precipitation)},
coords={'lon': lon,
'lat': lat,
'time': time,
'reference_time': pd.Timestamp('2014-09-05', tzinfo=timezone.utc)})
_ERROR_TYPES = {
'Value': ValueError,
'OS': OSError,
'Memory': MemoryError,
'Network': NetworkError,
'Data Access': DataAccessError,
'Validation': ValidationError,
}
@op(tags=['utility'])
@op_input('step_duration', units='seconds')
@op_input('error_type', value_set=['Value', 'OS', 'Memory', 'Network', 'Data Access', 'Validation'])
def no_op(num_steps: int = 20,
step_duration: float = 0.5,
fail_before: bool = False,
fail_after: bool = False,
error_type: str = 'Value',
monitor: Monitor = Monitor.NONE) -> bool:
"""
An operation that basically does nothing but spending configurable time.
It may be useful for testing purposes.
:param num_steps: Number of steps to iterate.
:param step_duration: How much time to spend in each step in seconds.
:param fail_before: If the operation should fail before spending time doing nothing (raise a ValidationError).
:param fail_after: If the operation should fail after spending time doing nothing (raise a ValueError).
:param error_type: The type of error to raise.
:param monitor: A progress monitor.
:return: Always True
"""
import time
with monitor.starting('Computing nothing', num_steps):
if fail_before:
error_class = _ERROR_TYPES[error_type]
raise error_class(f'This is a test: intentionally failed with a {error_type} error'
f' before {num_steps} times doing anything.')
for i in range(num_steps):
time.sleep(step_duration)
monitor.progress(1.0, 'Step %s of %s doing nothing' % (i + 1, num_steps))
if fail_after:
error_class = _ERROR_TYPES[error_type]
raise error_class(f'Intentionally failed failed with a {error_type} error'
f' after {num_steps} times doing nothing.')
return True
@op(tags=['utility', 'internal'])
@op_input('method', value_set=['backfill', 'bfill', 'pad', 'ffill'])
def pandas_fillna(df: pd.DataFrame,
value: float = None,
method: str = None,
limit: int = None,
**kwargs) -> pd.DataFrame:
"""
Return a new dataframe with NaN values filled according to the given value
or method.
This is a wrapper for the ``pandas.fillna()`` function For additional
keyword arguments and information refer to pandas documentation at
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.fillna.html
:param df: The dataframe to fill
:param value: Value to fill
:param method: Method according to which to fill NaN. ffill/pad will
propagate the last valid observation to the next valid observation.
backfill/bfill will propagate the next valid observation back to the last
valid observation.
:param limit: Maximum number of NaN values to forward/backward fill.
:return: A dataframe with nan values filled with the given value or according to the given method.
"""
# The following code is needed, because Pandas treats any kw given in kwargs as being set, even if just None.
kwargs = dict(kwargs)
if value:
kwargs.update(value=value)
if method:
kwargs.update(method=method)
if limit:
kwargs.update(limit=limit)
return df.fillna(**kwargs)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Wrappers around standard crypto, including root and intermediate CAs,
SSH keypairs and x509 certificates.
"""
import base64
import hashlib
import logging
import os
import shutil
import struct
import tempfile
import time
import utils
from nova import vendor
import M2Crypto
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA')
flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our keys')
flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA')
flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?')
def ca_path(project_id):
if project_id:
return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id)
return "%s/cacert.pem" % (FLAGS.ca_path)
def fetch_ca(project_id=None, chain=True):
if not FLAGS.use_intermediate_ca:
project_id = None
buffer = ""
if project_id:
with open(ca_path(project_id),"r") as cafile:
buffer += cafile.read()
if not chain:
return buffer
with open(ca_path(None),"r") as cafile:
buffer += cafile.read()
return buffer
def generate_key_pair(bits=1024):
# what is the magic 65537?
tmpdir = tempfile.mkdtemp()
keyfile = os.path.join(tmpdir, 'temp')
utils.execute('ssh-keygen -q -b %d -N "" -f %s' % (bits, keyfile))
(out, err) = utils.execute('ssh-keygen -q -l -f %s.pub' % (keyfile))
fingerprint = out.split(' ')[1]
private_key = open(keyfile).read()
public_key = open(keyfile + '.pub').read()
shutil.rmtree(tmpdir)
# code below returns public key in pem format
# key = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None)
# private_key = key.as_pem(cipher=None)
# bio = M2Crypto.BIO.MemoryBuffer()
# key.save_pub_key_bio(bio)
# public_key = bio.read()
# public_key, err = execute('ssh-keygen -y -f /dev/stdin', private_key)
return (private_key, public_key, fingerprint)
def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'):
rsa_key = M2Crypto.RSA.load_pub_key_bio(M2Crypto.BIO.MemoryBuffer(ssl_public_key))
e, n = rsa_key.pub()
key_type = 'ssh-rsa'
key_data = struct.pack('>I', len(key_type))
key_data += key_type
key_data += '%s%s' % (e,n)
b64_blob = base64.b64encode(key_data)
return '%s %s %s@%s\n' %(key_type, b64_blob, name, suffix)
def generate_x509_cert(subject, bits=1024):
tmpdir = tempfile.mkdtemp()
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
logging.debug("openssl genrsa -out %s %s" % (keyfile, bits))
utils.runthis("Generating private key: %s", "openssl genrsa -out %s %s" % (keyfile, bits))
utils.runthis("Generating CSR: %s", "openssl req -new -key %s -out %s -batch -subj %s" % (keyfile, csrfile, subject))
private_key = open(keyfile).read()
csr = open(csrfile).read()
shutil.rmtree(tmpdir)
return (private_key, csr)
def sign_csr(csr_text, intermediate=None):
if not FLAGS.use_intermediate_ca:
intermediate = None
if not intermediate:
return _sign_csr(csr_text, FLAGS.ca_path)
user_ca = "%s/INTER/%s" % (FLAGS.ca_path, intermediate)
if not os.path.exists(user_ca):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
utils.runthis("Generating intermediate CA: %s", "sh geninter.sh %s" % (intermediate))
os.chdir(start)
return _sign_csr(csr_text, user_ca)
def _sign_csr(csr_text, ca_folder):
tmpfolder = tempfile.mkdtemp()
csrfile = open("%s/inbound.csr" % (tmpfolder), "w")
csrfile.write(csr_text)
csrfile.close()
logging.debug("Flags path: %s" % ca_folder)
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)
utils.runthis("Signing cert: %s", "openssl ca -batch -out %s/outbound.crt -config ./openssl.cnf -infiles %s/inbound.csr" % (tmpfolder, tmpfolder))
os.chdir(start)
with open("%s/outbound.crt" % (tmpfolder), "r") as crtfile:
return crtfile.read()
def mkreq(bits, subject="foo", ca=0):
pk = M2Crypto.EVP.PKey()
req = M2Crypto.X509.Request()
rsa = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None)
pk.assign_rsa(rsa)
rsa = None # should not be freed here
req.set_pubkey(pk)
req.set_subject(subject)
req.sign(pk,'sha512')
assert req.verify(pk)
pk2 = req.get_pubkey()
assert req.verify(pk2)
return req, pk
def mkcacert(subject='nova', years=1):
req, pk = mkreq(2048, subject, ca=1)
pkey = req.get_pubkey()
sub = req.get_subject()
cert = M2Crypto.X509.X509()
cert.set_serial_number(1)
cert.set_version(2)
cert.set_subject(sub) # FIXME subject is not set in mkreq yet
t = long(time.time()) + time.timezone
now = M2Crypto.ASN1.ASN1_UTCTIME()
now.set_time(t)
nowPlusYear = M2Crypto.ASN1.ASN1_UTCTIME()
nowPlusYear.set_time(t + (years * 60 * 60 * 24 * 365))
cert.set_not_before(now)
cert.set_not_after(nowPlusYear)
issuer = M2Crypto.X509.X509_Name()
issuer.C = "US"
issuer.CN = subject
cert.set_issuer(issuer)
cert.set_pubkey(pkey)
ext = M2Crypto.X509.new_extension('basicConstraints', 'CA:TRUE')
cert.add_ext(ext)
cert.sign(pk, 'sha512')
# print 'cert', dir(cert)
print cert.as_pem()
print pk.get_rsa().as_pem()
return cert, pk, pkey
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# http://code.google.com/p/boto
def compute_md5(fp):
"""
@type fp: file
@param fp: File pointer to the file to MD5 hash. The file pointer will be
reset to the beginning of the file before the method returns.
@rtype: tuple
@return: the hex digest version of the MD5 hash
"""
m = hashlib.md5()
fp.seek(0)
s = fp.read(8192)
while s:
m.update(s)
s = fp.read(8192)
hex_md5 = m.hexdigest()
# size = fp.tell()
fp.seek(0)
return hex_md5
|
|
# Copyright (C) 2010, 2011 Ali Tofigh
#
# This file is part of PhylTr, a package for phylogenetic analysis
# using duplications and transfers.
#
# PhylTr is released under the terms of the license contained in the
# file LICENSE.
"""Utility functions useful when working with the PhylTr-package
Description will follow later...
"""
import random as _random
import types as _types
_LEAF_MARK = 'X'
_alphabet = 'abcdefghijklmnopqrstuvwxyz'
_rng = _random.Random()
_Z_table = [0, 1, 1]
def _isqrt(n):
xn = 1
xn1 = (xn + n/xn)/2
while abs(xn1 - xn) > 1:
xn = xn1
xn1 = (xn + n/xn)/2
while xn1*xn1 > n:
xn1 -= 1
return xn1
def _W(x, i, j):
assert 0 <= i <= j <= x-1, (i, j, x)
return x*(x+1)/2 - (x-i)*(x-i + 1)/2 + j - i
def _inv_W(w, x):
assert type(x) == _types.IntType or type(x) == _types.LongType
assert x >= 1
import math
r = (2*x - 1)**2 - 8*(w-x+1)
isqrt_r = _isqrt(r)
s = (2*x + 1)**2 - 8*w
isqrt_s = _isqrt(s)
i1 = 2*x - 1 - isqrt_r
if isqrt_r*isqrt_r != r:
i1 -= 1
i1 /= 2
i2 = 2*x + 1 - isqrt_s
if isqrt_s*isqrt_s != s:
i2 -= 1
i2 /= 2
i = max(i1, i2)
j = w - x*(x+1)/2 + (x-i)*(x-i + 1)/2 + i
return i, j
def _compute_Z_table(n):
assert type(n) == _types.IntType or type(n) == _types.LongType
for k in range(len(_Z_table), n+1):
Z_tmp = 0
i = 1
while 2*i < k:
Z_tmp += _Z_table[i] * _Z_table[k-i]
i += 1
if i*2 == k:
Z_tmp += _Z_table[i]*(_Z_table[i] + 1) / 2
_Z_table.append(Z_tmp)
def _Z(n):
assert type(n) == _types.IntType or type(n) == _types.LongType
if n >= len(_Z_table):
_compute_Z_table(n)
return _Z_table[n]
def _inv_rank(rank, n):
assert rank <= _Z(n), (n, rank, _Z(n))
if n == 1:
return _LEAF_MARK
a = 0
p = 0
while p < rank:
a += 1
p += _Z(a)*_Z(n-a)
p -= _Z(a)*_Z(n-a)
V = rank - p - 1
if 2*a == n:
left, right = _inv_W(V, _Z(a))
else:
left = V / _Z(n-a)
right = V % _Z(n-a)
# print "rank:", rank, "\tn:", n, "\ta:", a, "\tp:", p, "\tV:", V, "\tleft:", left, "\tright:", right
return "(" + _inv_rank(left + 1, a) + ", " + _inv_rank(right + 1, n-a) + ")"
def _get_random_tree(n):
"""Returns a rooted full binary tree of size n"""
assert (type(n) == _types.IntType or type(n) == _types.LongType) and n >= 2
rank = _rng.randint(1, _Z(n))
return _inv_rank(rank, n)
def create_random_input(species, genes):
def label(n):
assert type(n) == _types.IntType or type(n) == _types.LongType
size = len(_alphabet)
l = ""
while n >= 0:
nr = n % size
l += _alphabet[nr]
n /= size
n -= 1
return l[-1::-1]
if type(species) != _types.IntType and type(species) != _types.LongType:
raise TypeError, "Expected an integer, got a " + str(type(species))
if type(genes) != _types.IntType and type(genes) != _types.LongType:
raise TypeError, "Expected an integer, got a " + str(type(species))
if species > genes:
raise ValueError, "The number of species exceeds the number of genes"
if species < 2 or genes < 2:
raise ValueError, "genes and species must be at least 2"
gene_tree = _get_random_tree(genes)
species_tree = _get_random_tree(species)
species_parts = species_tree.split(_LEAF_MARK)
gene_parts = gene_tree.split(_LEAF_MARK)
labels = []
for i in range(species):
labels.append(label(i))
new_species_parts = [species_parts[0]]
for i in range(species):
new_species_parts.append(labels[i])
new_species_parts.append(species_parts[i+1])
excess_labels = []
for i in range(genes-species):
excess_labels.append(_random.choice(labels))
labels += excess_labels
labels.sort()
gene_labels = []
sigma = []
i = 0
while i < len(labels):
j = 1
gene_labels.append(labels[i] + str(j))
sigma.append((labels[i] + str(j), labels[i]))
while i+1 < len(labels) and labels[i] == labels[i+1]:
i += 1
j += 1
gene_labels.append(labels[i] + str(j))
sigma.append((labels[i] + str(j), labels[i]))
i += 1
_random.shuffle(gene_labels)
new_gene_parts = [gene_parts[0]]
for i in range(genes):
new_gene_parts.append(gene_labels[i])
new_gene_parts.append(gene_parts[i+1])
return "".join(new_species_parts), "".join(new_gene_parts), sigma
def gcd(a,b):
"""Return greatest common divisor using Euclid's Algorithm."""
while b:
a, b = b, a % b
return a
def lcm(a, b):
return (a * b) / gcd(a, b)
class Rational:
def __init__(self, numerator, denominator):
common = gcd(numerator, denominator)
self.numerator = numerator / common
self.denominator = denominator / common
def __cmp__(self, other):
return self.numerator * other.denominator - self.denominator * other.numerator
def __hash__(self):
return int(hash(self.numerator) * hash(self.denominator))
def __repr__(self):
return str(self.numerator) + '/' + str(self.denominator)
def __str__(self):
return repr(self)
|
|
import six
import grab.spider.base
from grab import Grab
from grab.spider import Spider, Task, SpiderMisuseError, NoTaskHandler
from grab.spider import inline_task
from test.util import (BaseGrabTestCase, build_grab, build_spider,
multiprocess_mode)
from grab.spider.error import SpiderError
class SimpleSpider(Spider):
base_url = 'http://google.com'
def task_baz(self, grab, task):
self.SAVED_ITEM = grab.response.body
class TestSpider(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def test_task_priority(self):
# Automatic random priority
grab.spider.base.RANDOM_TASK_PRIORITY_RANGE = (10, 20)
bot = build_spider(SimpleSpider, priority_mode='random')
bot.setup_queue()
task = Task('baz', url='xxx')
self.assertEqual(task.priority, None)
bot.add_task(task)
self.assertTrue(10 <= task.priority <= 20)
# Automatic constant priority
grab.spider.base.DEFAULT_TASK_PRIORITY = 33
bot = build_spider(SimpleSpider, priority_mode='const')
bot.setup_queue()
task = Task('baz', url='xxx')
self.assertEqual(task.priority, None)
bot.add_task(task)
self.assertEqual(33, task.priority)
# Automatic priority does not override explictily setted priority
grab.spider.base.DEFAULT_TASK_PRIORITY = 33
bot = build_spider(SimpleSpider, priority_mode='const')
bot.setup_queue()
task = Task('baz', url='xxx', priority=1)
self.assertEqual(1, task.priority)
bot.add_task(task)
self.assertEqual(1, task.priority)
self.assertRaises(SpiderMisuseError,
lambda: SimpleSpider(priority_mode='foo'))
def test_task_url(self):
bot = build_spider(SimpleSpider, )
bot.setup_queue()
task = Task('baz', url='xxx')
self.assertEqual('xxx', task.url)
bot.add_task(task)
self.assertEqual('http://google.com/xxx', task.url)
self.assertEqual(None, task.grab_config)
g = Grab(url='yyy')
task = Task('baz', grab=g)
bot.add_task(task)
self.assertEqual('http://google.com/yyy', task.url)
self.assertEqual('http://google.com/yyy', task.grab_config['url'])
def test_task_clone(self):
bot = build_spider(SimpleSpider, )
bot.setup_queue()
task = Task('baz', url='xxx')
bot.add_task(task.clone())
# Pass grab to clone
task = Task('baz', url='xxx')
g = Grab()
g.setup(url='zzz')
bot.add_task(task.clone(grab=g))
# Pass grab_config to clone
task = Task('baz', url='xxx')
g = Grab()
g.setup(url='zzz')
bot.add_task(task.clone(grab_config=g.config))
def test_task_clone_with_url_param(self):
task = Task('baz', url='xxx')
task.clone(url='http://yandex.ru/')
def test_task_useragent(self):
bot = build_spider(SimpleSpider, )
bot.setup_queue()
g = Grab()
g.setup(url=self.server.get_url())
g.setup(user_agent='Foo')
task = Task('baz', grab=g)
bot.add_task(task.clone())
bot.run()
self.assertEqual(self.server.request['headers']['User-Agent'], 'Foo')
def test_task_nohandler_error(self):
class TestSpider(Spider):
pass
bot = build_spider(TestSpider, )
bot.setup_queue()
bot.add_task(Task('page', url=self.server.get_url()))
self.assertRaises(NoTaskHandler, bot.run)
def test_task_raw(self):
class TestSpider(Spider):
def task_page(self, grab, task):
self.stat.collect('codes', grab.response.code)
self.server.response['code'] = 502
bot = build_spider(TestSpider, network_try_limit=1)
bot.setup_queue()
bot.add_task(Task('page', url=self.server.get_url()))
bot.add_task(Task('page', url=self.server.get_url()))
bot.run()
self.assertEqual(0, len(bot.stat.collections['codes']))
bot = build_spider(TestSpider, network_try_limit=1)
bot.setup_queue()
bot.add_task(Task('page', url=self.server.get_url(), raw=True))
bot.add_task(Task('page', url=self.server.get_url(), raw=True))
bot.run()
self.assertEqual(2, len(bot.stat.collections['codes']))
@multiprocess_mode(False)
def test_task_callback(self):
class TestSpider(Spider):
def task_page(self, grab, task):
self.meta['tokens'].append('0_handler')
class FuncWithState(object):
def __init__(self, tokens):
self.tokens = tokens
def __call__(self, grab, task):
self.tokens.append('1_func')
tokens = []
func = FuncWithState(tokens)
bot = build_spider(TestSpider, )
bot.meta['tokens'] = tokens
bot.setup_queue()
# classic handler
bot.add_task(Task('page', url=self.server.get_url()))
# callback option overried classic handler
bot.add_task(Task('page', url=self.server.get_url(), callback=func))
# callback and null task name
bot.add_task(Task(name=None, url=self.server.get_url(), callback=func))
# callback and default task name
bot.add_task(Task(url=self.server.get_url(), callback=func))
bot.run()
self.assertEqual(['0_handler', '1_func', '1_func', '1_func'],
sorted(tokens))
@multiprocess_mode(False)
def test_inline_task(self):
def callback(self):
self.write(self.request.uri)
self.finish()
self.server.response['get.callback'] = callback
server = self.server
class TestSpider(Spider):
def add_response(self, grab):
self.stat.collect('responses', grab.doc.unicode_body())
def task_generator(self):
url = server.get_url('/?foo=start')
yield Task('inline', url=url)
def subroutine_task(self, grab):
for x in six.moves.range(2):
url = server.get_url('/?foo=subtask%s' % x)
grab.setup(url=url)
grab = yield Task(grab=grab)
self.add_response(grab)
self.stat.collect('calls', 'subinline%s' % x)
@inline_task
def task_inline(self, grab, task):
self.add_response(grab)
self.stat.collect('calls', 'generator')
for x in six.moves.range(3):
url = server.get_url('/?foo=%s' % x)
grab.setup(url=url)
grab = yield Task(grab=grab)
self.add_response(grab)
self.stat.collect('calls', 'inline%s' % x)
grab = yield self.subroutine_task(grab)
# In this case the grab body will be the same
# as is in subroutine task: /?foo=subtask1
self.add_response(grab)
url = server.get_url('/?foo=yield')
self.add_task(Task('yield', url=url))
def task_yield(self, grab, task):
self.add_response(grab)
self.stat.collect('calls', 'yield')
url = server.get_url('/?foo=end')
yield Task('end', url=url)
def task_end(self, grab, task):
self.add_response(grab)
self.stat.collect('calls', 'end')
bot = build_spider(TestSpider, )
bot.run()
self.assertEqual(['/?foo=start',
'/?foo=0',
'/?foo=subtask0', '/?foo=subtask1', '/?foo=subtask1',
'/?foo=1',
'/?foo=subtask0', '/?foo=subtask1', '/?foo=subtask1',
'/?foo=2',
'/?foo=subtask0', '/?foo=subtask1', '/?foo=subtask1',
'/?foo=yield', '/?foo=end'],
bot.stat.collections['responses'])
self.assertEqual(['generator',
'inline0',
'subinline0', 'subinline1',
'inline1',
'subinline0', 'subinline1',
'inline2',
'subinline0', 'subinline1',
'yield', 'end'],
bot.stat.collections['calls'])
def test_task_url_and_grab_options(self):
class TestSpider(Spider):
def setup(self):
self.done = False
def task_page(self, grab, task):
self.done = True
bot = build_spider(TestSpider, )
bot.setup_queue()
g = Grab()
g.setup(url=self.server.get_url())
self.assertRaises(SpiderMisuseError, Task,
'page', grab=g, url=self.server.get_url())
def test_task_invalid_name(self):
self.assertRaises(SpiderMisuseError, Task,
'generator', url='http://ya.ru/')
def test_task_constructor_invalid_args(self):
# no url, no grab, no grab_config
self.assertRaises(SpiderMisuseError, Task, 'foo')
# both url and grab_config
self.assertRaises(SpiderMisuseError, Task, 'foo',
url=1, grab_config=1)
# both grab and grab_config
self.assertRaises(SpiderMisuseError, Task, 'foo',
grab=1, grab_config=1)
def test_task_clone_invalid_args(self):
task = Task('foo', url='http://ya.ru/')
# both url and grab
self.assertRaises(SpiderMisuseError, task.clone,
url=1, grab=1)
# both url and grab_config
self.assertRaises(SpiderMisuseError, task.clone,
url=1, grab_config=1)
# both grab_config and grab
self.assertRaises(SpiderMisuseError, task.clone,
grab=1, grab_config=1)
def test_task_clone_grab_config_and_url(self):
g = build_grab()
g.setup(url='http://foo.com/')
task = Task('foo', grab=g)
task2 = task.clone(url='http://bar.com/')
self.assertEqual(task2.url, 'http://bar.com/')
self.assertEqual(task2.grab_config['url'], 'http://bar.com/')
def test_task_clone_kwargs(self):
g = build_grab()
g.setup(url='http://foo.com/')
task = Task('foo', grab=g, cache_timeout=1)
task2 = task.clone(cache_timeout=2)
self.assertEqual(2, task2.cache_timeout)
def test_task_comparison(self):
t1 = Task('foo', url='http://foo.com/', priority=1)
t2 = Task('foo', url='http://foo.com/', priority=2)
t3 = Task('foo', url='http://foo.com/')
# If both tasks have priorities then task are
# compared by their priorities
self.assertTrue(t1 < t2)
# If any of compared tasks does not have priority
# than tasks are equal
self.assertTrue(t1 == t3)
self.assertTrue(t3 == t3)
def test_task_get_fallback_handler(self):
class TestSpider(Spider):
def zz(self, task):
pass
def task_bar_fallback(self, task):
pass
t1 = Task('foo', url='http://foo.com/', fallback_name='zz')
t2 = Task('bar', url='http://foo.com/')
t3 = Task(url='http://foo.com/')
bot = build_spider(TestSpider, )
self.assertEqual(t1.get_fallback_handler(bot), bot.zz)
self.assertEqual(t2.get_fallback_handler(bot), bot.task_bar_fallback)
self.assertEqual(t3.get_fallback_handler(bot), None)
def test_update_grab_instance(self):
class TestSpider(Spider):
def update_grab_instance(self, grab):
grab.setup(timeout=77)
def task_generator(self):
yield Task('page', url=self.meta['server'].get_url())
yield Task('page', grab=Grab(url=self.meta['server'].get_url(),
timeout=1))
def task_page(self, grab, task):
self.stat.collect('points', grab.config['timeout'])
bot = build_spider(TestSpider, meta={'server': self.server})
bot.setup_queue()
bot.add_task(Task('page', url=self.server.get_url()))
bot.add_task(Task('page', grab=Grab(url=self.server.get_url(),
timeout=1)))
bot.run()
self.assertEqual(set([77]), set(bot.stat.collections['points']))
def test_create_grab_instance(self):
class TestSpider(Spider):
def create_grab_instance(self, **kwargs):
grab = super(TestSpider, self).create_grab_instance(**kwargs)
grab.setup(timeout=77)
return grab
def task_generator(self):
yield Task('page', url=self.meta['server'].get_url())
yield Task('page', grab=Grab(url=self.meta['server'].get_url(),
timeout=76))
def task_page(self, grab, task):
self.stat.collect('points', grab.config['timeout'])
bot = build_spider(TestSpider, meta={'server': self.server})
bot.setup_queue()
bot.add_task(Task('page', url=self.server.get_url()))
bot.add_task(Task('page', grab=Grab(url=self.server.get_url(),
timeout=75)))
bot.run()
self.assertEqual(set([77, 76, 75]),
set(bot.stat.collections['points']))
def test_add_task_invalid_url_no_error(self):
class TestSpider(Spider):
pass
bot = build_spider(TestSpider, )
bot.setup_queue()
bot.add_task(Task('page', url='zz://zz'))
self.assertEqual(0, bot.task_queue.size())
bot.add_task(Task('page', url='zz://zz'), raise_error=False)
self.assertEqual(0, bot.task_queue.size())
bot.add_task(Task('page', url='http://example.com/'))
self.assertEqual(1, bot.task_queue.size())
def test_add_task_invalid_url_raise_error(self):
class TestSpider(Spider):
pass
bot = build_spider(TestSpider, )
bot.setup_queue()
self.assertRaises(SpiderError, bot.add_task,
Task('page', url='zz://zz'), raise_error=True)
self.assertEqual(0, bot.task_queue.size())
bot.add_task(Task('page', url='http://example.com/'))
self.assertEqual(1, bot.task_queue.size())
def test_multiple_internal_worker_error(self):
class TestSpider(Spider):
def process_network_result_with_handler_mp(*args, **kwargs):
1/0
def task_page(self):
pass
bot = build_spider(TestSpider, )
bot.setup_queue()
for x in range(5):
bot.add_task(Task('page', url='http://ya.ru/'))
bot.run()
self.assertTrue(1 < bot.stat.counters['parser-pipeline-restore'])
|
|
from sympy.utilities.pytest import XFAIL, raises
from sympy import (
symbols, lambdify, sqrt, sin, cos, tan, pi, acos, acosh, Rational,
Float, Matrix, Lambda, Piecewise, exp, Integral, oo, I, Abs, Function,
true, false, And, Or, Not, ITE, Min, Max)
from sympy.printing.lambdarepr import LambdaPrinter
import mpmath
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.pytest import skip
from sympy.utilities.decorator import conserve_mpmath_dps
from sympy.external import import_module
import math
import sympy
MutableDenseMatrix = Matrix
numpy = import_module('numpy')
numexpr = import_module('numexpr')
w, x, y, z = symbols('w,x,y,z')
#================== Test different arguments =======================
def test_no_args():
f = lambdify([], 1)
raises(TypeError, lambda: f(-1))
assert f() == 1
def test_single_arg():
f = lambdify(x, 2*x)
assert f(1) == 2
def test_list_args():
f = lambdify([x, y], x + y)
assert f(1, 2) == 3
def test_str_args():
f = lambdify('x,y,z', 'z,y,x')
assert f(3, 2, 1) == (1, 2, 3)
assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0)
# make sure correct number of args required
raises(TypeError, lambda: f(0))
def test_own_namespace():
myfunc = lambda x: 1
f = lambdify(x, sin(x), {"sin": myfunc})
assert f(0.1) == 1
assert f(100) == 1
def test_own_module():
f = lambdify(x, sin(x), math)
assert f(0) == 0.0
def test_bad_args():
# no vargs given
raises(TypeError, lambda: lambdify(1))
# same with vector exprs
raises(TypeError, lambda: lambdify([1, 2]))
def test_atoms():
# Non-Symbol atoms should not be pulled out from the expression namespace
f = lambdify(x, pi + x, {"pi": 3.14})
assert f(0) == 3.14
f = lambdify(x, I + x, {"I": 1j})
assert f(1) == 1 + 1j
#================== Test different modules =========================
# high precision output of sin(0.2*pi) is used to detect if precision is lost unwanted
@conserve_mpmath_dps
def test_sympy_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "sympy")
assert f(x) == sin(x)
prec = 1e-15
assert -prec < f(Rational(1, 5)).evalf() - Float(str(sin02)) < prec
# arctan is in numpy module and should not be available
raises(NameError, lambda: lambdify(x, arctan(x), "sympy"))
@conserve_mpmath_dps
def test_math_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "math")
prec = 1e-15
assert -prec < f(0.2) - sin02 < prec
raises(TypeError, lambda: f(x))
# if this succeeds, it can't be a python math function
@conserve_mpmath_dps
def test_mpmath_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(mpmath.mpf("0.2")) - sin02 < prec
raises(TypeError, lambda: f(x))
# if this succeeds, it can't be a mpmath function
@conserve_mpmath_dps
@XFAIL
def test_number_precision():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin02, "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(0) - sin02 < prec
#================== Test Translations ==============================
# We can only check if all translated functions are valid. It has to be checked
# by hand if they are complete.
def test_math_transl():
from sympy.utilities.lambdify import MATH_TRANSLATIONS
for sym, mat in MATH_TRANSLATIONS.items():
assert sym in sympy.__dict__
assert mat in math.__dict__
def test_mpmath_transl():
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
for sym, mat in MPMATH_TRANSLATIONS.items():
assert sym in sympy.__dict__ or sym == 'Matrix'
assert mat in mpmath.__dict__
def test_numpy_transl():
if not numpy:
skip("numpy not installed.")
from sympy.utilities.lambdify import NUMPY_TRANSLATIONS
for sym, nump in NUMPY_TRANSLATIONS.items():
assert sym in sympy.__dict__
assert nump in numpy.__dict__
def test_numpy_translation_abs():
if not numpy:
skip("numpy not installed.")
f = lambdify(x, Abs(x), "numpy")
assert f(-1) == 1
assert f(1) == 1
def test_numexpr_printer():
if not numexpr:
skip("numexpr not installed.")
# if translation/printing is done incorrectly then evaluating
# a lambdified numexpr expression will throw an exception
from sympy.printing.lambdarepr import NumExprPrinter
from sympy import S
blacklist = ('where', 'complex', 'contains')
arg_tuple = (x, y, z) # some functions take more than one argument
for sym in NumExprPrinter._numexpr_functions.keys():
if sym in blacklist:
continue
ssym = S(sym)
if hasattr(ssym, '_nargs'):
nargs = ssym._nargs[0]
else:
nargs = 1
args = arg_tuple[:nargs]
f = lambdify(args, ssym(*args), modules='numexpr')
assert f(*(1, )*nargs) is not None
def test_issue_9334():
if not numexpr:
skip("numexpr not installed.")
if not numpy:
skip("numpy not installed.")
expr = sympy.S('b*a - sqrt(a**2)')
a, b = sorted(expr.free_symbols, key=lambda s: s.name)
func_numexpr = lambdify((a,b), expr, modules=[numexpr], dummify=False)
foo, bar = numpy.random.random((2, 4))
func_numexpr(foo, bar)
#================== Test some functions ============================
def test_exponentiation():
f = lambdify(x, x**2)
assert f(-1) == 1
assert f(0) == 0
assert f(1) == 1
assert f(-2) == 4
assert f(2) == 4
assert f(2.5) == 6.25
def test_sqrt():
f = lambdify(x, sqrt(x))
assert f(0) == 0.0
assert f(1) == 1.0
assert f(4) == 2.0
assert abs(f(2) - 1.414) < 0.001
assert f(6.25) == 2.5
def test_trig():
f = lambdify([x], [cos(x), sin(x)])
d = f(pi)
prec = 1e-11
assert -prec < d[0] + 1 < prec
assert -prec < d[1] < prec
d = f(3.14159)
prec = 1e-5
assert -prec < d[0] + 1 < prec
assert -prec < d[1] < prec
#================== Test vectors ===================================
def test_vector_simple():
f = lambdify((x, y, z), (z, y, x))
assert f(3, 2, 1) == (1, 2, 3)
assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0)
# make sure correct number of args required
raises(TypeError, lambda: f(0))
def test_vector_discontinuous():
f = lambdify(x, (-1/x, 1/x))
raises(ZeroDivisionError, lambda: f(0))
assert f(1) == (-1.0, 1.0)
assert f(2) == (-0.5, 0.5)
assert f(-2) == (0.5, -0.5)
def test_trig_symbolic():
f = lambdify([x], [cos(x), sin(x)])
d = f(pi)
assert abs(d[0] + 1) < 0.0001
assert abs(d[1] - 0) < 0.0001
def test_trig_float():
f = lambdify([x], [cos(x), sin(x)])
d = f(3.14159)
assert abs(d[0] + 1) < 0.0001
assert abs(d[1] - 0) < 0.0001
def test_docs():
f = lambdify(x, x**2)
assert f(2) == 4
f = lambdify([x, y, z], [z, y, x])
assert f(1, 2, 3) == [3, 2, 1]
f = lambdify(x, sqrt(x))
assert f(4) == 2.0
f = lambdify((x, y), sin(x*y)**2)
assert f(0, 5) == 0
def test_math():
f = lambdify((x, y), sin(x), modules="math")
assert f(0, 5) == 0
def test_sin():
f = lambdify(x, sin(x)**2)
assert isinstance(f(2), float)
f = lambdify(x, sin(x)**2, modules="math")
assert isinstance(f(2), float)
def test_matrix():
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol = Matrix([[1, 2], [sin(3) + 4, 1]])
f = lambdify((x, y, z), A, modules="sympy")
assert f(1, 2, 3) == sol
f = lambdify((x, y, z), (A, [A]), modules="sympy")
assert f(1, 2, 3) == (sol, [sol])
J = Matrix((x, x + y)).jacobian((x, y))
v = Matrix((x, y))
sol = Matrix([[1, 0], [1, 1]])
assert lambdify(v, J, modules='sympy')(1, 2) == sol
assert lambdify(v.T, J, modules='sympy')(1, 2) == sol
def test_numpy_matrix():
if not numpy:
skip("numpy not installed.")
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol_arr = numpy.array([[1, 2], [numpy.sin(3) + 4, 1]])
#Lambdify array first, to ensure return to array as default
f = lambdify((x, y, z), A, ['numpy'])
numpy.testing.assert_allclose(f(1, 2, 3), sol_arr)
#Check that the types are arrays and matrices
assert isinstance(f(1, 2, 3), numpy.ndarray)
def test_numpy_transpose():
if not numpy:
skip("numpy not installed.")
A = Matrix([[1, x], [0, 1]])
f = lambdify((x), A.T, modules="numpy")
numpy.testing.assert_array_equal(f(2), numpy.array([[1, 0], [2, 1]]))
def test_numpy_inverse():
if not numpy:
skip("numpy not installed.")
A = Matrix([[1, x], [0, 1]])
f = lambdify((x), A**-1, modules="numpy")
numpy.testing.assert_array_equal(f(2), numpy.array([[1, -2], [0, 1]]))
def test_numpy_old_matrix():
if not numpy:
skip("numpy not installed.")
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol_arr = numpy.array([[1, 2], [numpy.sin(3) + 4, 1]])
f = lambdify((x, y, z), A, [{'ImmutableMatrix': numpy.matrix}, 'numpy'])
numpy.testing.assert_allclose(f(1, 2, 3), sol_arr)
assert isinstance(f(1, 2, 3), numpy.matrix)
def test_numpy_piecewise():
if not numpy:
skip("numpy not installed.")
pieces = Piecewise((x, x < 3), (x**2, x > 5), (0, True))
f = lambdify(x, pieces, modules="numpy")
numpy.testing.assert_array_equal(f(numpy.arange(10)),
numpy.array([0, 1, 2, 0, 0, 0, 36, 49, 64, 81]))
# If we evaluate somewhere all conditions are False, we should get back NaN
nodef_func = lambdify(x, Piecewise((x, x > 0), (-x, x < 0)))
numpy.testing.assert_array_equal(nodef_func(numpy.array([-1, 0, 1])),
numpy.array([1, numpy.nan, 1]))
def test_numpy_logical_ops():
if not numpy:
skip("numpy not installed.")
and_func = lambdify((x, y), And(x, y), modules="numpy")
or_func = lambdify((x, y), Or(x, y), modules="numpy")
not_func = lambdify((x), Not(x), modules="numpy")
arr1 = numpy.array([True, True])
arr2 = numpy.array([False, True])
numpy.testing.assert_array_equal(and_func(arr1, arr2), numpy.array([False, True]))
numpy.testing.assert_array_equal(or_func(arr1, arr2), numpy.array([True, True]))
numpy.testing.assert_array_equal(not_func(arr2), numpy.array([True, False]))
def test_numpy_matmul():
if not numpy:
skip("numpy not installed.")
xmat = Matrix([[x, y], [z, 1+z]])
ymat = Matrix([[x**2], [Abs(x)]])
mat_func = lambdify((x, y, z), xmat*ymat, modules="numpy")
numpy.testing.assert_array_equal(mat_func(0.5, 3, 4), numpy.array([[1.625], [3.5]]))
numpy.testing.assert_array_equal(mat_func(-0.5, 3, 4), numpy.array([[1.375], [3.5]]))
# Multiple matrices chained together in multiplication
f = lambdify((x, y, z), xmat*xmat*xmat, modules="numpy")
numpy.testing.assert_array_equal(f(0.5, 3, 4), numpy.array([[72.125, 119.25],
[159, 251]]))
def test_numpy_numexpr():
if not numpy:
skip("numpy not installed.")
if not numexpr:
skip("numexpr not installed.")
a, b, c = numpy.random.randn(3, 128, 128)
# ensure that numpy and numexpr return same value for complicated expression
expr = sin(x) + cos(y) + tan(z)**2 + Abs(z-y)*acos(sin(y*z)) + \
Abs(y-z)*acosh(2+exp(y-x))- sqrt(x**2+I*y**2)
npfunc = lambdify((x, y, z), expr, modules='numpy')
nefunc = lambdify((x, y, z), expr, modules='numexpr')
assert numpy.allclose(npfunc(a, b, c), nefunc(a, b, c))
def test_numexpr_userfunctions():
if not numpy:
skip("numpy not installed.")
if not numexpr:
skip("numexpr not installed.")
a, b = numpy.random.randn(2, 10)
uf = type('uf', (Function, ),
{'eval' : classmethod(lambda x, y : y**2+1)})
func = lambdify(x, 1-uf(x), modules='numexpr')
assert numpy.allclose(func(a), -(a**2))
uf = implemented_function(Function('uf'), lambda x, y : 2*x*y+1)
func = lambdify((x, y), uf(x, y), modules='numexpr')
assert numpy.allclose(func(a, b), 2*a*b+1)
def test_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy")
assert l(x) == Integral(exp(-x**2), (x, -oo, oo))
#================== Test symbolic ==================================
def test_sym_single_arg():
f = lambdify(x, x * y)
assert f(z) == z * y
def test_sym_list_args():
f = lambdify([x, y], x + y + z)
assert f(1, 2) == 3 + z
def test_sym_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy")
assert l(y).doit() == sqrt(pi)
def test_namespace_order():
# lambdify had a bug, such that module dictionaries or cached module
# dictionaries would pull earlier namespaces into themselves.
# Because the module dictionaries form the namespace of the
# generated lambda, this meant that the behavior of a previously
# generated lambda function could change as a result of later calls
# to lambdify.
n1 = {'f': lambda x: 'first f'}
n2 = {'f': lambda x: 'second f',
'g': lambda x: 'function g'}
f = sympy.Function('f')
g = sympy.Function('g')
if1 = lambdify(x, f(x), modules=(n1, "sympy"))
assert if1(1) == 'first f'
if2 = lambdify(x, g(x), modules=(n2, "sympy"))
# previously gave 'second f'
assert if1(1) == 'first f'
def test_imps():
# Here we check if the default returned functions are anonymous - in
# the sense that we can have more than one function with the same name
f = implemented_function('f', lambda x: 2*x)
g = implemented_function('f', lambda x: math.sqrt(x))
l1 = lambdify(x, f(x))
l2 = lambdify(x, g(x))
assert str(f(x)) == str(g(x))
assert l1(3) == 6
assert l2(3) == math.sqrt(3)
# check that we can pass in a Function as input
func = sympy.Function('myfunc')
assert not hasattr(func, '_imp_')
my_f = implemented_function(func, lambda x: 2*x)
assert hasattr(func, '_imp_')
# Error for functions with same name and different implementation
f2 = implemented_function("f", lambda x: x + 101)
raises(ValueError, lambda: lambdify(x, f(f2(x))))
def test_imps_wrong_args():
raises(ValueError, lambda: implemented_function(sin, lambda x: x))
def test_lambdify_imps():
# Test lambdify with implemented functions
# first test basic (sympy) lambdify
f = sympy.cos
assert lambdify(x, f(x))(0) == 1
assert lambdify(x, 1 + f(x))(0) == 2
assert lambdify((x, y), y + f(x))(0, 1) == 2
# make an implemented function and test
f = implemented_function("f", lambda x: x + 100)
assert lambdify(x, f(x))(0) == 100
assert lambdify(x, 1 + f(x))(0) == 101
assert lambdify((x, y), y + f(x))(0, 1) == 101
# Can also handle tuples, lists, dicts as expressions
lam = lambdify(x, (f(x), x))
assert lam(3) == (103, 3)
lam = lambdify(x, [f(x), x])
assert lam(3) == [103, 3]
lam = lambdify(x, [f(x), (f(x), x)])
assert lam(3) == [103, (103, 3)]
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {x: f(x)})
assert lam(3) == {3: 103}
# Check that imp preferred to other namespaces by default
d = {'f': lambda x: x + 99}
lam = lambdify(x, f(x), d)
assert lam(3) == 103
# Unless flag passed
lam = lambdify(x, f(x), d, use_imps=False)
assert lam(3) == 102
def test_dummification():
t = symbols('t')
F = Function('F')
G = Function('G')
#"\alpha" is not a valid python variable name
#lambdify should sub in a dummy for it, and return
#without a syntax error
alpha = symbols(r'\alpha')
some_expr = 2 * F(t)**2 / G(t)
lam = lambdify((F(t), G(t)), some_expr)
assert lam(3, 9) == 2
lam = lambdify(sin(t), 2 * sin(t)**2)
assert lam(F(t)) == 2 * F(t)**2
#Test that \alpha was properly dummified
lam = lambdify((alpha, t), 2*alpha + t)
assert lam(2, 1) == 5
raises(SyntaxError, lambda: lambdify(F(t) * G(t), F(t) * G(t) + 5))
raises(SyntaxError, lambda: lambdify(2 * F(t), 2 * F(t) + 5))
raises(SyntaxError, lambda: lambdify(2 * F(t), 4 * F(t) + 5))
def test_python_keywords():
# Test for issue 7452. The automatic dummification should ensure use of
# Python reserved keywords as symbol names will create valid lambda
# functions. This is an additional regression test.
python_if = symbols('if')
expr = python_if / 2
f = lambdify(python_if, expr)
assert f(4.0) == 2.0
def test_lambdify_docstring():
func = lambdify((w, x, y, z), w + x + y + z)
assert func.__doc__ == (
"Created with lambdify. Signature:\n\n"
"func(w, x, y, z)\n\n"
"Expression:\n\n"
"w + x + y + z")
syms = symbols('a1:26')
func = lambdify(syms, sum(syms))
assert func.__doc__ == (
"Created with lambdify. Signature:\n\n"
"func(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,\n"
" a16, a17, a18, a19, a20, a21, a22, a23, a24, a25)\n\n"
"Expression:\n\n"
"a1 + a10 + a11 + a12 + a13 + a14 + a15 + a16 + a17 + a18 + a19 + a2 + a20 +...")
#================== Test special printers ==========================
def test_special_printers():
class IntervalPrinter(LambdaPrinter):
"""Use ``lambda`` printer but print numbers as ``mpi`` intervals. """
def _print_Integer(self, expr):
return "mpi('%s')" % super(IntervalPrinter, self)._print_Integer(expr)
def _print_Rational(self, expr):
return "mpi('%s')" % super(IntervalPrinter, self)._print_Rational(expr)
def intervalrepr(expr):
return IntervalPrinter().doprint(expr)
expr = sympy.sqrt(sympy.sqrt(2) + sympy.sqrt(3)) + sympy.S(1)/2
func0 = lambdify((), expr, modules="mpmath", printer=intervalrepr)
func1 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter)
func2 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter())
mpi = type(mpmath.mpi(1, 2))
assert isinstance(func0(), mpi)
assert isinstance(func1(), mpi)
assert isinstance(func2(), mpi)
def test_true_false():
# We want exact is comparison here, not just ==
assert lambdify([], true)() is True
assert lambdify([], false)() is False
def test_issue_2790():
assert lambdify((x, (y, z)), x + y)(1, (2, 4)) == 3
assert lambdify((x, (y, (w, z))), w + x + y + z)(1, (2, (3, 4))) == 10
assert lambdify(x, x + 1, dummify=False)(1) == 2
def test_ITE():
assert lambdify((x, y, z), ITE(x, y, z))(True, 5, 3) == 5
assert lambdify((x, y, z), ITE(x, y, z))(False, 5, 3) == 3
def test_Min_Max():
# see gh-10375
assert lambdify((x, y, z), Min(x, y, z))(1, 2, 3) == 1
assert lambdify((x, y, z), Max(x, y, z))(1, 2, 3) == 3
|
|
import sys
try:
from gurobipy import GRB, quicksum, Var
except:
pass
from Heuristics import RealGPS
from Models import FixedWaitingTimeModel, TEGModel
from Problem import Problem, SolvinType
from Solver import Solver
from optimizedGPS.structure.DriversStructure import DriversStructure
from optimizedGPS.structure.Graph import Graph
class ConstantModelAlgorithm(Problem):
"""
We set to every edge a constant congestion function.
As seen in the related study, the model is solvable in polynomial time.
We display here such an algorithm.
"""
def __init__(self, graph, drivers_graph, **kwargs):
self.model = FixedWaitingTimeModel(graph, drivers_graph, **kwargs)
for driver in self.model.drivers_graph.get_all_drivers():
for edge in self.get_graph().edges():
self.model.set_waiting_time(driver, edge, self.get_graph().get_congestion_function(*edge)(0))
kwargs["solving_type"] = SolvinType.HEURISTIC
super(ConstantModelAlgorithm, self).__init__(**kwargs)
def get_graph(self):
return self.model.get_graph()
def get_drivers_graph(self):
return self.model.get_drivers_graph()
def solve_with_heuristic(self):
for i in range(10):
self.model.solve()
better_solution = self.value > self.model.value
for driver in self.model.drivers_graph.get_all_drivers():
path = self.model.get_optimal_driver_path(driver)
waiting_times = self.model.get_optimal_driver_waiting_times(driver)
for edge in self.get_graph().iter_edges_in_path(path):
self.model.set_waiting_time(driver, edge, waiting_times[edge])
if better_solution:
self.set_optimal_path_to_driver(driver, path)
if better_solution:
self.value = self.model.value
class TEGColumnGenerationAlgorithm(Problem):
"""
Column generation algorithm using as master problem the TEGModel.
"""
PATH_COLUMN_GENERATION = "path"
UNIQUE_COLUMN_GENERATION = "unique"
def __init__(self, graph, drivers_graph, drivers_structure=None, heuristic=None, column_generation_type=None,
**kwargs):
super(TEGColumnGenerationAlgorithm, self).__init__(graph, drivers_graph, drivers_structure=drivers_structure,
**kwargs)
if column_generation_type in [None, self.UNIQUE_COLUMN_GENERATION]:
self.column_generation_type = self.UNIQUE_COLUMN_GENERATION
elif column_generation_type == self.PATH_COLUMN_GENERATION:
self.column_generation_type = self.PATH_COLUMN_GENERATION
else:
raise TypeError("colum generation type should be one of the class column generation possibilities")
self.heuristic = heuristic if heuristic is not None else RealGPS(graph, drivers_graph)
self.master = Solver(
self.graph, drivers_graph, TEGModel, drivers_structure=self.get_initial_structures(), binary=False)
def get_initial_structures(self):
"""
1. Solve the heuristic
2. Build structure taking the found optimal solution:
- for each driver and each visited edge, add as starting (resp. ending) time the unique driver's starting
(resp. ending) on the given edge
"""
self.heuristic.solve()
# Build the initial structure
drivers_structure = DriversStructure(self.graph, self.drivers_graph)
for driver, opt_solution in self.heuristic.iter_complete_optimal_solution():
i, edge = 0, None
edges = set()
while edge is None or edge[1] != driver.end:
edge, starting_time = opt_solution[i]
edges.add(edge)
drivers_structure.add_starting_times(driver, edge, starting_time)
try:
drivers_structure.add_ending_times(driver, edge, opt_solution[i + 1][1])
except (KeyError, IndexError):
pass
i += 1
drivers_structure.add_ending_times(
driver, edge, driver.time + self.heuristic.get_driver_driving_time(driver))
drivers_structure.set_unreachable_edge_to_driver(
driver, *filter(lambda e: e not in edges, self.graph.edges_iter()))
return drivers_structure
def get_TEGgraph(self):
return self.master.algorithm.TEGgraph
def set_optimal_solution(self):
self.opt_solution = {}
for driver, path in self.master.iter_optimal_solution():
self.set_optimal_path_to_driver(driver, path)
def get_paths_as_next_columns(self):
"""
We minimize the reduced cost (see master) over a specified set of paths for each driver.
"""
# TODO: add a limit to the paths iteration
best_driver, best_path, best_value = None, None, sys.maxint
teg = self.get_TEGgraph()
for driver in self.drivers_graph.get_all_drivers():
traffic = self.master.get_optimal_traffic()
n_org = 0
for extended_path in self.graph.get_sorted_paths_with_traffic(
driver.start, driver.end, driver.time, traffic, delta=10):
if n_org == self.graph.number_of_edges():
break
path = tuple(map(lambda e: e[0], extended_path))
n = 0
for path_ in teg.iter_time_paths_from_path(path, starting_time=driver.time):
if n == self.graph.number_of_edges():
break
value = 0
for edge_ in Graph.iter_edges_in_path(path_):
edge = teg.get_original_edge(edge_)
start = teg.get_node_layer(edge_[0])
end = teg.get_node_layer(edge_[1])
if not self.master.algorithm.has_variable(driver, edge, start, end):
value += self.master.algorithm.get_reduced_cost(driver, edge, start, end) or 0
if value < best_value:
best_driver, best_path = driver, path_
best_value = value
n += 1
n_org += 1
try:
return map(
lambda e_: (
best_driver, teg.get_original_edge(e_), teg.get_node_layer(e_[0]), teg.get_node_layer(e_[1])),
Graph.iter_edges_in_path(best_path)
)
except:
return []
def get_unique_column(self):
best_driver, best_edge, best_start, best_end, reduced_cost = None, None, None, None, sys.maxint
for driver in self.drivers_graph.get_all_drivers():
for edge in self.graph.edges_iter():
for start, end in self.drivers_structure.iter_time_intervals(driver, edge):
if not self.master.algorithm.has_variable(driver, edge, start, end):
rc = self.master.algorithm.get_reduced_cost(driver, edge, start, end)
if rc < reduced_cost:
best_driver, best_edge, best_start, best_end, reduced_cost = driver, edge, start, end, rc
res = (best_driver, best_edge, best_start, best_end)
if all(map(lambda e: e is not None, res)):
return [res]
def get_next_columns(self):
if self.column_generation_type == self.PATH_COLUMN_GENERATION:
return self.get_paths_as_next_columns()
elif self.column_generation_type == self.UNIQUE_COLUMN_GENERATION:
return self.get_unique_column()
def add_column_to_master(self, column):
driver, edge, start, end = column
self.master.drivers_structure.set_reachable_edge_to_driver(driver, edge)
self.master.drivers_structure.add_starting_times(driver, edge, start)
self.master.drivers_structure.add_ending_times(driver, edge, end)
update = self.master.algorithm.generate_variables(driver, edge, [(start, end)])
self.master.algorithm.generate_constraints(driver, edge, [(start, end)])
return update
def solve_master_as_integer(self):
"""
Solve the master problem with integer variables
"""
graph = self.master.graph
drivers_graph = self.master.drivers_graph
drivers_structure = self.master.drivers_structure
self.master = Solver(graph, drivers_graph, TEGModel, drivers_structure=drivers_structure, binary=True)
self.master.solve()
def solve_with_solver(self):
"""
Classical column generation algorithm:
1. Solve the master problem
2. Solve the reduced problem taking the dual solution of the master problem.
This returns us the best next column to add
3. If optimality, stop, else add the next column and restart from 1.
"""
while True:
self.master.solve()
next_columns = self.get_next_columns()
if not next_columns:
break # Optimality reached
update = False
for column in next_columns:
update = self.add_column_to_master(column)
if update is False:
break
self.solve_master_as_integer()
self.set_optimal_solution()
|
|
__author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
from libs.datasets.pycocotools import mask as maskUtils # change importing
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = maskUtils.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if not 'bbox' in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
m = maskUtils.decode(rle)
return m
|
|
import cuttsum.events
import cuttsum.corpora
from cuttsum.pipeline import InputStreamResource
from cuttsum.classifiers import NuggetRegressor
import cuttsum.judgements
import pandas as pd
import numpy as np
from datetime import datetime
from cuttsum.misc import event2semsim
from sklearn.cluster import AffinityPropagation
from sklearn.metrics.pairwise import cosine_similarity
import os
def epoch(dt):
return int((dt - datetime(1970, 1, 1)).total_seconds())
matches_df = cuttsum.judgements.get_merged_dataframe()
def get_input_stream(event, gold_probs, extractor="goose", thresh=.8, delay=None, topk=20):
max_nuggets = 3
corpus = cuttsum.corpora.get_raw_corpus(event)
res = InputStreamResource()
df = pd.concat(
res.get_dataframes(event, corpus, extractor, thresh, delay, topk))
selector = (df["n conf"] == 1) & (df["nugget probs"].apply(len) == 0)
df.loc[selector, "nugget probs"] = df.loc[selector, "nuggets"].apply(lambda x: {n:1 for n in x})
df["true probs"] = df["nugget probs"].apply(lambda x: [val for key, val in x.items()] +[0])
df["true probs"] = df["true probs"].apply(lambda x: np.max(x))
df.loc[(df["n conf"] == 1) & (df["nuggets"].apply(len) == 0), "true probs"] = 0
if gold_probs is True:
df["probs"] = df["true probs"]
else:
df["probs"] = NuggetRegressor().predict(event, df)
df["nuggets"] = df["nugget probs"].apply(
lambda x: set([key for key, val in x.items() if val > .9]))
nid2time = {}
nids = set(matches_df[matches_df["query id"] == event.query_id]["nugget id"].tolist())
for nid in nids:
ts = matches_df[matches_df["nugget id"] == nid]["update id"].apply(lambda x: int(x.split("-")[0])).tolist()
ts.sort()
nid2time[nid] = ts[0]
fltr_nuggets = []
for name, row in df.iterrows():
fltr_nuggets.append(
set([nug for nug in row["nuggets"] if nid2time[nug] <= row["timestamp"]]))
#print df[["nuggets", "timestamp"]].apply(lambda y: print y[0]) # datetime.utcfromtimestamp(int(y["timestamp"])))
#print nids
df["nuggets"] = fltr_nuggets
df["nuggets"] = df["nuggets"].apply(lambda x: x if len(x) <= max_nuggets else set([]))
from cuttsum.pipeline import DedupedArticlesResource
ded = DedupedArticlesResource()
stats_df = ded.get_stats_df(event, corpus, extractor, thresh)
stats_df["stream ids"] = stats_df["stream ids"].apply(lambda x: set(eval(x)))
sid2match = {}
for _, row in stats_df.iterrows():
for sid in row["stream ids"]:
sid2match[sid] = row["match"]
all_ts = []
all_docs = []
new_docs = []
for (sid, ts), doc in df.groupby(["stream id", "timestamp"]):
# print sub_doc
if len(all_ts) > 0:
assert ts >= all_ts[-1]
all_ts.append(ts)
if sid2match[sid] is True:
new_docs.append(doc)
all_docs.append(doc)
df = pd.concat(new_docs)
print len(all_docs), len(new_docs)
return df
def main(output_dir, sim_threshold, bucket_size):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
dev_qids = set([19, 23, 27, 34, 35,] + [7,24])
summary_data = []
K_data = []
for event in cuttsum.events.get_events():
if event.query_num in dev_qids: continue
print event
semsim = event2semsim(event)
istream = get_input_stream(event, False, extractor="goose",
thresh=.8, delay=None, topk=20)
prev_time = 0
cache = None
clusters = []
max_h = len(event.list_event_hours()) - 1
for h, hour in enumerate(event.list_event_hours()):
if h % bucket_size != 0 and h != max_h:
continue
current_time = epoch(hour)
input_sents = istream[
(istream["timestamp"] < current_time) & \
(istream["timestamp"] >= prev_time)]
len_select = input_sents["lemmas stopped"].apply(len) > 10
input_sents = input_sents[len_select]
if len(input_sents) <= 1: continue
stems = input_sents["stems"].apply(lambda x: ' '.join(x)).tolist()
X = semsim.transform(stems)
K = -(1 - cosine_similarity(X))
K_ma = np.ma.masked_array(K, np.eye(K.shape[0]))
Kmin = np.ma.min(K_ma)
Kmax = np.ma.max(K_ma)
median = np.ma.median(K_ma)[0]
print "SYS TIME:", hour, "# SENTS:", K.shape[0],
print "min/median/max pref: {}/{}/{}".format(
Kmin, median, Kmax)
#
ap = AffinityPropagation(affinity="precomputed",
verbose=True, max_iter=1000)
ap.fit(K)
labels = ap.labels_
if ap.cluster_centers_indices_ != None:
for c in ap.cluster_centers_indices_:
if cache == None:
cache = X[c]
updates_df = \
input_sents.reset_index(drop=True).iloc[c]
updates_df["query id"] = event.query_num
updates_df["system timestamp"] = current_time
summary_data.append(
updates_df[
["query id", "stream id", "sent id",
"system timestamp", "sent text"]
].to_frame().T
)
else:
Ksum = cosine_similarity(cache, X[c])
if Ksum.max() < sim_threshold:
cache = np.vstack([cache, X[c]])
updates_df = \
input_sents.reset_index(drop=True).iloc[c]
updates_df["query id"] = event.query_num
updates_df["system timestamp"] = current_time
summary_data.append(
updates_df[
["query id", "stream id", "sent id",
"system timestamp", "sent text"]
].to_frame().T
)
prev_time = current_time
df = pd.DataFrame(K_data, columns=["min", "max", "median"])
print df
print df.mean()
print df.std()
print df.max()
df = pd.concat(summary_data)
df["conf"] = .5
df["team id"] = "AP"
df["run id"] = "sim{}_bs{}".format(
sim_threshold, bucket_size)
print df
of = os.path.join(output_dir, "ap." + "sim{}_bs{}.tsv".format(
sim_threshold, bucket_size))
cols = ["query id", "team id", "run id", "stream id", "sent id",
"system timestamp", "conf"]
df[cols].to_csv(of, sep="\t", header=False, index=False)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(u"--output-dir", type=str,
required=True, help="directory to write results.")
parser.add_argument(
u"--sim-cutoff", type=float, required=True)
parser.add_argument(
u"--bucket-size", type=float, required=True)
args = parser.parse_args()
main(args.output_dir, args.sim_cutoff, args.bucket_size)
|
|
"""Convenience functions for running common Picard utilities.
"""
import os
import collections
import pysam
from bcbio.utils import file_exists
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
def picard_rnaseq_metrics(picard, align_bam, ref, ribo="null", out_file=None):
""" Collect RNASeq metrics for a bam file """
base, ext = os.path.splitext(align_bam)
if out_file is None:
out_file = "%s.metrics" % (base)
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("INPUT", align_bam),
("OUTPUT", tx_out_file),
("TMP_DIR", tmp_dir),
("REF_FLAT", ref),
("STRAND_SPECIFICITY", "NONE"),
("ASSUME_SORTED", "True"),
("RIBOSOMAL_INTERVALS", ribo)]
picard.run("CollectRnaSeqMetrics", opts)
return out_file
def picard_insert_metrics(picard, align_bam, out_file=None):
""" Collect insert size metrics for a bam file """
base, ext = os.path.splitext(align_bam)
if out_file is None:
out_file = "%s-insert-metrics.txt" % (base)
histogram = "%s-insert-histogram.pdf" % (base)
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("INPUT", align_bam),
("OUTPUT", tx_out_file),
("HISTOGRAM_FILE", histogram),
("TMP_DIR", tmp_dir)]
picard.run("CollectInsertSizeMetrics", opts)
return out_file
def picard_sort(picard, align_bam, sort_order="coordinate",
out_file=None, compression_level=None, pipe=False):
"""Sort a BAM file by coordinates.
"""
base, ext = os.path.splitext(align_bam)
if out_file is None:
out_file = "%s-sort%s" % (base, ext)
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("INPUT", align_bam),
("OUTPUT", out_file if pipe else tx_out_file),
("TMP_DIR", tmp_dir),
("SORT_ORDER", sort_order)]
if compression_level:
opts.append(("COMPRESSION_LEVEL", compression_level))
picard.run("SortSam", opts, pipe=pipe)
return out_file
def picard_merge(picard, in_files, out_file=None,
merge_seq_dicts=False):
"""Merge multiple BAM files together with Picard.
"""
if out_file is None:
out_file = "%smerge.bam" % os.path.commonprefix(in_files)
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("OUTPUT", tx_out_file),
("SORT_ORDER", "coordinate"),
("MERGE_SEQUENCE_DICTIONARIES",
"true" if merge_seq_dicts else "false"),
("USE_THREADING", "true"),
("TMP_DIR", tmp_dir)]
for in_file in in_files:
opts.append(("INPUT", in_file))
picard.run("MergeSamFiles", opts)
return out_file
def picard_index(picard, in_bam):
index_file = "%s.bai" % in_bam
alt_index_file = "%s.bai" % os.path.splitext(in_bam)[0]
if not file_exists(index_file) and not file_exists(alt_index_file):
with file_transaction(picard._config, index_file) as tx_index_file:
opts = [("INPUT", in_bam),
("OUTPUT", tx_index_file)]
picard.run("BuildBamIndex", opts)
return index_file if file_exists(index_file) else alt_index_file
def picard_reorder(picard, in_bam, ref_file, out_file):
"""Reorder BAM file to match reference file ordering.
"""
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("INPUT", in_bam),
("OUTPUT", tx_out_file),
("REFERENCE", ref_file),
("ALLOW_INCOMPLETE_DICT_CONCORDANCE", "true"),
("TMP_DIR", tmp_dir)]
picard.run("ReorderSam", opts)
return out_file
def picard_fix_rgs(picard, in_bam, names):
"""Add read group information to BAM files and coordinate sort.
"""
out_file = "%s-fixrgs.bam" % os.path.splitext(in_bam)[0]
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("INPUT", in_bam),
("OUTPUT", tx_out_file),
("SORT_ORDER", "coordinate"),
("RGID", names["rg"]),
("RGLB", names.get("lb", "unknown")),
("RGPL", names["pl"]),
("RGPU", names["pu"]),
("RGSM", names["sample"]),
("TMP_DIR", tmp_dir)]
picard.run("AddOrReplaceReadGroups", opts)
return out_file
def picard_downsample(picard, in_bam, ds_pct, random_seed=None):
out_file = "%s-downsample%s" % os.path.splitext(in_bam)
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("INPUT", in_bam),
("OUTPUT", tx_out_file),
("PROBABILITY", "%.3f" % ds_pct),
("TMP_DIR", tmp_dir)]
if random_seed:
opts += [("RANDOM_SEED", str(random_seed))]
picard.run("DownsampleSam", opts)
return out_file
def picard_index_ref(picard, ref_file):
"""Provide a Picard style dict index file for a reference genome.
"""
dict_file = "%s.dict" % os.path.splitext(ref_file)[0]
if not file_exists(dict_file):
with file_transaction(picard._config, dict_file) as tx_dict_file:
opts = [("REFERENCE", ref_file),
("OUTPUT", tx_dict_file)]
picard.run("CreateSequenceDictionary", opts)
return dict_file
def picard_fastq_to_bam(picard, fastq_one, fastq_two, out_dir, names, order="queryname"):
"""Convert fastq file(s) to BAM, adding sample, run group and platform information.
"""
out_bam = os.path.join(out_dir, "%s-fastq.bam" %
os.path.splitext(os.path.basename(fastq_one))[0])
if not file_exists(out_bam):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_bam) as tx_out_bam:
opts = [("FASTQ", fastq_one),
("READ_GROUP_NAME", names["rg"]),
("SAMPLE_NAME", names["sample"]),
("PLATFORM_UNIT", names["pu"]),
("PLATFORM", names["pl"]),
("TMP_DIR", tmp_dir),
("OUTPUT", tx_out_bam),
("SORT_ORDER", order)]
if fastq_two:
opts.append(("FASTQ2", fastq_two))
picard.run("FastqToSam", opts)
return out_bam
def picard_bam_to_fastq(picard, in_bam, fastq_one, fastq_two=None):
"""Convert BAM file to fastq.
"""
if not file_exists(fastq_one):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, fastq_one) as tx_out1:
opts = [("INPUT", in_bam),
("FASTQ", tx_out1),
("TMP_DIR", tmp_dir)]
if fastq_two is not None:
opts += [("SECOND_END_FASTQ", fastq_two)]
picard.run("SamToFastq", opts)
return (fastq_one, fastq_two)
def picard_sam_to_bam(picard, align_sam, fastq_bam, ref_file,
is_paired=False):
"""Convert SAM to BAM, including unmapped reads from fastq BAM file.
"""
to_retain = ["XS", "XG", "XM", "XN", "XO", "YT"]
if align_sam.endswith(".sam"):
out_bam = "%s.bam" % os.path.splitext(align_sam)[0]
elif align_sam.endswith("-align.bam"):
out_bam = "%s.bam" % align_sam.replace("-align.bam", "")
else:
raise NotImplementedError("Input format not recognized")
if not file_exists(out_bam):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_bam) as tx_out_bam:
opts = [("UNMAPPED", fastq_bam),
("ALIGNED", align_sam),
("OUTPUT", tx_out_bam),
("REFERENCE_SEQUENCE", ref_file),
("TMP_DIR", tmp_dir),
("PAIRED_RUN", ("true" if is_paired else "false")),
]
opts += [("ATTRIBUTES_TO_RETAIN", x) for x in to_retain]
picard.run("MergeBamAlignment", opts)
return out_bam
def picard_formatconverter(picard, align_sam):
"""Convert aligned SAM file to BAM format.
"""
out_bam = "%s.bam" % os.path.splitext(align_sam)[0]
if not file_exists(out_bam):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_bam) as tx_out_bam:
opts = [("INPUT", align_sam),
("OUTPUT", tx_out_bam),
("TMP_DIR", tmp_dir)]
picard.run("SamFormatConverter", opts)
return out_bam
def picard_mark_duplicates(picard, align_bam, remove_dups=False):
base, ext = os.path.splitext(align_bam)
base = base.replace(".", "-")
dup_bam = "%s-dup%s" % (base, ext)
dup_metrics = "%s-dup.dup_metrics" % base
if not file_exists(dup_bam):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, dup_bam, dup_metrics) as (tx_dup_bam, tx_dup_metrics):
opts = [("INPUT", align_bam),
("OUTPUT", tx_dup_bam),
("TMP_DIR", tmp_dir),
("REMOVE_DUPLICATES", "true" if remove_dups else "false"),
("METRICS_FILE", tx_dup_metrics)]
if picard.get_picard_version("MarkDuplicates") >= 1.82:
opts += [("PROGRAM_RECORD_ID", "null")]
picard.run("MarkDuplicates", opts, memscale={"direction": "decrease", "magnitude": 2})
return dup_bam, dup_metrics
def picard_fixmate(picard, align_bam):
"""Run Picard's FixMateInformation generating an aligned output file.
"""
base, ext = os.path.splitext(align_bam)
out_file = "%s-sort%s" % (base, ext)
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("INPUT", align_bam),
("OUTPUT", tx_out_file),
("TMP_DIR", tmp_dir),
("SORT_ORDER", "coordinate")]
picard.run("FixMateInformation", opts)
return out_file
def picard_idxstats(picard, align_bam):
"""Retrieve alignment stats from picard using BamIndexStats.
"""
opts = [("INPUT", align_bam)]
stdout = picard.run("BamIndexStats", opts, get_stdout=True)
out = []
AlignInfo = collections.namedtuple("AlignInfo", ["contig", "length", "aligned", "unaligned"])
for line in stdout.split("\n"):
if line:
parts = line.split()
if len(parts) == 2:
_, unaligned = parts
out.append(AlignInfo("nocontig", 0, 0, int(unaligned)))
elif len(parts) == 7:
contig, _, length, _, aligned, _, unaligned = parts
out.append(AlignInfo(contig, int(length), int(aligned), int(unaligned)))
else:
raise ValueError("Unexpected output from BamIndexStats: %s" % line)
return out
def bed2interval(align_file, bed, out_file=None):
"""Converts a bed file to an interval file for use with some of the
Picard tools by grabbing the header from the alignment file, reording
the bed file columns and gluing them together.
align_file can be in BAM or SAM format.
bed needs to be in bed12 format:
http://genome.ucsc.edu/FAQ/FAQformat.html#format1.5
"""
base, ext = os.path.splitext(align_file)
if out_file is None:
out_file = base + ".interval"
with pysam.Samfile(align_file, "r" if ext.endswith(".sam") else "rb") as in_bam:
header = in_bam.text
def reorder_line(line):
splitline = line.strip().split("\t")
reordered = "\t".join([splitline[0], str(int(splitline[1]) + 1), splitline[2],
splitline[5], splitline[3]])
return reordered + "\n"
with file_transaction(out_file) as tx_out_file:
with open(bed) as bed_handle:
with open(tx_out_file, "w") as out_handle:
out_handle.write(header)
for line in bed_handle:
out_handle.write(reorder_line(line))
return out_file
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import importlib
import re
from flask import request, url_for, g
from flask_restful import Resource, abort
from marshmallow.exceptions import ValidationError
from sqlalchemy.exc import IntegrityError
from twisted.logger import Logger
log = Logger()
from .authenticator import HMACAuthenticator
from .db import db
class RecordAPI(Resource):
"""Base class to expose a db.Model rendered by a JSONAPISchema through
a REST API implementing GET, DELETE and PATCH.
To create the REST API for a model, subclass the RecordAPI and
define the following class members:
- model_class = the class of the database model (subclass of db.Model)
- schema_class = the class of the schema (subclass of JSONAPISchema)
then register the resource using flask_restful Api.add_resource.
To define the LIST and POST operations, subclass RecordListAPI.
"""
@classmethod
def register_resource(cls, flask_restful_api):
flask_restful_api.add_resource(cls, '/' + cls.schema_class.Meta.type_ + '/<item_id>')
@classmethod
def scoped_endpoint(cls):
return '.' + cls.endpoint
def _get_item(self, item_id, operation, check_existence):
query = self.model_class.query
if self.access_control is not None:
query = self.access_control.alter_query(operation, query,
g.user,
self.resource_name,
self.model_class)
item = query.filter_by(id=item_id).first()
exists = None
if not item and check_existence:
exists = self.model_class.query.get(item_id) != None
return item, exists
def get(self, item_id):
item, exists = self._get_item(item_id, 'read', check_existence=True)
if not item:
if exists is None:
abort(500)
elif not exists:
abort(404)
else:
abort(403)
else:
return self.schema_class().dump(item).data
def delete(self, item_id):
item, exists = self._get_item(item_id, 'delete', check_existence=True)
if not item:
if exists is None:
abort(500)
elif not exists:
abort(404)
else:
abort(403)
else:
msg = item.checkDeletePrecondition()
if msg:
abort(400, errors=[msg])
else:
db.session.delete(item)
db.session.commit()
return '', 204
def patch(self, item_id):
if request.mimetype != 'application/vnd.api+json':
return '', 415, {'Accept-Patch': 'application/vnd.api+json'}
input_data = request.get_json(force=True) or {}
schema = self.schema_class()
try:
data, _ = schema.load(input_data, partial=True)
except ValidationError as err:
abort(415, errors=[err.args[0]])
except Exception as err:
abort(422, errors=[err.args[0]])
item, exists = self._get_item(item_id, 'update', check_existence=True)
if not item:
# According to RFC5789, we may create the ressource, but we do not.
if exists is None:
abort(500)
elif not exists:
abort(404)
else:
abort(403)
else:
for key in data:
setattr(item, key, data[key])
msg = item.checkUpdatePrecondition()
if msg:
abort(400, errors=[msg])
else:
db.session.commit()
return '', 204, {'Content-Location': url_for(self.scoped_endpoint(), item_id=item_id)}
class RecordListAPI(Resource):
"""Base class to expose a db.Model rendered by a JSONAPISchema through
a REST API implementing LIST and POST.
To create the REST API, subclass the RecordListAPI and define the
following class members:
- model_class = the class of the database model (subclass of db.Model)
- schema_class = the class of the schema (subclass of JSONAPISchema)
- record_api = the class of the corresponding RecordAPI subclass.
Register the resource using register_resource.
To define the GET (single record), DELETE and PATCH operations,
subclass RecordAPI.
"""
@classmethod
def register_resource(cls, flask_restful_api):
flask_restful_api.add_resource(cls, '/' + cls.schema_class.Meta.type_)
def get(self):
query = self.model_class.query
if self.access_control is not None:
query = self.access_control.alter_query('read', query,
g.user,
self.resource_name,
self.model_class)
# Filter using query parameters
for args, value in request.args.iteritems():
c = getattr(self.model_class, self.schema_class._declared_fields[args].attribute)
if value == '':
value = None
query = query.filter(c == value)
items = query.all()
return self.schema_class().dump(items, many=True).data
def post(self):
if request.mimetype != 'application/vnd.api+json':
return '', 415, {'Accept-Patch': 'application/vnd.api+json'}
input_data = request.get_json(force=True) or {}
schema = self.schema_class()
try:
data, _ = schema.load(input_data)
except ValidationError as err:
abort(415, errors=[err.args[0]])
except Exception as err:
abort(422, errors=[err.args[0]])
new_item = self.model_class(**data)
db.session.add(new_item)
db.session.commit()
return (self.schema_class().dump(new_item, many=False).data,
200, {'Content-Location': url_for(self.record_api.scoped_endpoint(), item_id=new_item.id)})
def make_api(flask_restful_app, name, model_class, schema_class,
api_authorizers=None, record_authorizer=None):
"""Helper function to build an API for a schema.
record_authorizer: instance of a woodbox.access_control.record.RecordAccessControl.
api_authorizers: list of decorators, for example the authorize()
member of an woodbox.access_control.api.Acl.
"""
# TODO: we could have a schema_class per role
if api_authorizers is None:
method_decorators = []
else:
method_decorators = api_authorizers
method_decorators.append(HMACAuthenticator.authenticate)
# Create a subclass of Record[List]API and register the resource with the app.
t = type(str(name+'API'), (RecordAPI,),
{'method_decorators': method_decorators,
'resource_name': name,
'model_class': model_class,
'schema_class': schema_class,
'access_control': record_authorizer
})
t.register_resource(flask_restful_app)
t = type(str(name+'ListAPI'), (RecordListAPI,),
{'method_decorators': method_decorators,
'resource_name': name,
'model_class': model_class,
'schema_class': schema_class,
'access_control': record_authorizer,
'record_api': t
})
t.register_resource(flask_restful_app)
|
|
#!/usr/bin/env python
###############################################################################
# Name: PlateButtonDemo.py #
# Purpose: PlateButton Test and Demo File #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2007 Cody Precord <staff@editra.org> #
# Licence: wxWindows Licence #
###############################################################################
#-----------------------------------------------------------------------------#
# Imports
import os
import webbrowser
import wx
import wx.lib.scrolledpanel as scrolled
try:
import wx.lib.platebtn as platebtn
except ImportError:
import platebtn
#-----------------------------------------------------------------------------#
class TestPanel(scrolled.ScrolledPanel):
def __init__(self, parent, log):
self.log = log
scrolled.ScrolledPanel.__init__(self, parent, size=(400, 400))
# Layout
self.__DoLayout()
self.SetupScrolling()
# Event Handlers
self.Bind(wx.EVT_BUTTON, self.OnButton)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnToggleButton)
self.Bind(wx.EVT_MENU, self.OnMenu)
def __DoLayout(self):
"""Layout the panel"""
# Make three different panels of buttons with different backgrounds
# to test transparency and appearance of buttons under different use
# cases
p1 = wx.Panel(self)
p2 = GradientPanel(self)
p3 = wx.Panel(self)
p3.SetBackgroundColour(wx.BLUE)
self.__LayoutPanel(p1, "Default Background:")
self.__LayoutPanel(p2, "Gradient Background:", exstyle=True)
self.__LayoutPanel(p3, "Solid Background:")
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddMany([(p1, 0, wx.EXPAND), (p2, 0, wx.EXPAND),
(p3, 0, wx.EXPAND)])
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(sizer, 1, wx.EXPAND)
self.SetSizer(hsizer)
self.SetAutoLayout(True)
def __LayoutPanel(self, panel, label, exstyle=False):
"""Puts a set of controls in the panel
@param panel: panel to layout
@param label: panels title
@param exstyle: Set the PB_STYLE_NOBG or not
"""
# Bitmaps (32x32) and (16x16)
devil = Devil.GetBitmap() # 32x32
monkey = Monkey.GetBitmap() # 32x32
address = Address.GetBitmap() # 16x16
folder = Home.GetBitmap()
bookmark = Book.GetBitmap() # 16x16
vsizer = wx.BoxSizer(wx.VERTICAL)
hsizer1 = wx.BoxSizer(wx.HORIZONTAL)
hsizer1.Add((15, 15))
hsizer2 = wx.BoxSizer(wx.HORIZONTAL)
hsizer2.Add((15, 15))
hsizer3 = wx.BoxSizer(wx.HORIZONTAL)
hsizer3.Add((15, 15))
hsizer4 = wx.BoxSizer(wx.HORIZONTAL)
hsizer4.Add((15, 15))
# Button Styles
default = platebtn.PB_STYLE_DEFAULT
square = platebtn.PB_STYLE_SQUARE
sqgrad = platebtn.PB_STYLE_SQUARE | platebtn.PB_STYLE_GRADIENT
gradient = platebtn.PB_STYLE_GRADIENT
droparrow = platebtn.PB_STYLE_DROPARROW
toggle = default | platebtn.PB_STYLE_TOGGLE
# Create a number of different PlateButtons
# Each button is created in the below loop by using the data set in this
# lists tuple
# (bmp, label, Style, Variant, Menu, Color, Enable)
btype = [(None, "Normal PlateButton", default, None, None, None, True),
(devil, "Normal w/Bitmap", default, None, None, None, True),
(devil, "Disabled", default, None, None, None, False),
(None, "Normal w/Menu", default, None, True, None, True),
(folder, "Home Folder", default, None, True, None, True),
# Row 2
(None, "Square PlateButton", square, None, None, None, True),
(address, "Square/Bitmap", square, None, None, None, True),
(monkey, "Square/Gradient", sqgrad, None, None, None, True),
(address, "Square/Small", square, wx.WINDOW_VARIANT_SMALL, True, None, True),
(address, "Small Bitmap", default, wx.WINDOW_VARIANT_SMALL, None, wx.Colour(33, 33, 33), True),
# Row 3
(devil, "Custom Color", default, None, None, wx.RED, True),
(monkey, "Gradient Highlight", gradient, None, None, None, True),
(monkey, "Custom Gradient", gradient, None, None, wx.Colour(245, 55, 245), True),
(None, "Drop Arrow", droparrow, None, None, None, True),
(devil, "", default, None, None, None, True),
(bookmark, "", default, None, True, None, True),
(monkey, "", square, None, None, None, True),
# Row 4
(None, "Toggle PlateButton", toggle, None, None, None, True),
(devil, "Toggle w/Bitmap", toggle, None, None, None, True),
(None, "Toggle w/Menu", toggle, None, True, None, True),
]
# Make and layout three rows of buttons in the panel
for btn in btype:
if exstyle:
# With this style flag set the button can appear transparent on
# on top of a background that is not solid in color, such as the
# gradient panel in this demo.
#
# Note: This flag only has affect on wxMSW and should only be
# set when the background is not a solid color. On wxMac
# it is a no-op as this type of transparency is achieved
# without any help needed. On wxGtk it doesn't hurt to
# set but also unfortunatly doesn't help at all.
bstyle = btn[2] | platebtn.PB_STYLE_NOBG
else:
bstyle = btn[2]
if btype.index(btn) < 5:
tsizer = hsizer1
elif btype.index(btn) < 10:
tsizer = hsizer2
elif btype.index(btn) < 17:
tsizer = hsizer3
else:
tsizer = hsizer4
tbtn = platebtn.PlateButton(panel, wx.ID_ANY, btn[1], btn[0], style=bstyle)
# Set a custom window size variant?
if btn[3] is not None:
tbtn.SetWindowVariant(btn[3])
# Make a menu for the button?
if btn[4] is not None:
menu = wx.Menu()
if btn[0] is not None and btn[0] == folder:
for fname in os.listdir(wx.GetHomeDir()):
if not fname.startswith('.'):
menu.Append(wx.NewId(), fname)
elif btn[0] is not None and btn[0] == bookmark:
for url in ['http://wxpython.org', 'http://slashdot.org',
'http://editra.org', 'http://xkcd.com']:
menu.Append(wx.NewId(), url, "Open %s in your browser" % url)
else:
menu.Append(wx.NewId(), "Menu Item 1")
menu.Append(wx.NewId(), "Menu Item 2")
menu.Append(wx.NewId(), "Menu Item 3")
tbtn.SetMenu(menu)
# Set a custom colour?
if btn[5] is not None:
tbtn.SetPressColor(btn[5])
if btn[2] == droparrow:
tbtn.Bind(platebtn.EVT_PLATEBTN_DROPARROW_PRESSED, self.OnDropArrowPressed)
# Enable/Disable button state
tbtn.Enable(btn[6])
tsizer.AddMany([(tbtn, 0, wx.ALIGN_CENTER), ((10, 10))])
txt_sz = wx.BoxSizer(wx.HORIZONTAL)
txt_sz.AddMany([((5, 5)), (wx.StaticText(panel, label=label), 0, wx.ALIGN_LEFT)])
vsizer.AddMany([((10, 10)),
(txt_sz, 0, wx.ALIGN_LEFT),
((10, 10)), (hsizer1, 0, wx.EXPAND), ((10, 10)),
(hsizer2, 0, wx.EXPAND), ((10, 10)),
(hsizer3, 0, wx.EXPAND), ((10, 10)),
(hsizer4, 0, wx.EXPAND), ((10, 10))])
panel.SetSizer(vsizer)
def OnDropArrowPressed(self, evt):
self.log.write("DROPARROW PRESSED")
def OnButton(self, evt):
self.log.write("BUTTON CLICKED: Id: %d, Label: %s" % \
(evt.GetId(), evt.GetEventObject().LabelText))
def OnToggleButton(self, evt):
self.log.write("TOGGLE BUTTON CLICKED: Id: %d, Label: %s, Pressed: %s" % \
(evt.GetId(), evt.GetEventObject().LabelText,
evt.GetEventObject().IsPressed()))
def OnChildFocus(self, evt):
"""Override ScrolledPanel.OnChildFocus to prevent erratic
scrolling on wxMac.
"""
if wx.Platform != '__WXMAC__':
evt.Skip()
child = evt.GetWindow()
self.ScrollChildIntoView(child)
def OnMenu(self, evt):
"""Events from button menus"""
self.log.write("MENU SELECTED: %d" % evt.GetId())
e_obj = evt.GetEventObject()
mitem = e_obj.FindItemById(evt.GetId())
if mitem != wx.NOT_FOUND:
label = mitem.GetItemLabel()
if label.startswith('http://'):
webbrowser.open(label, True)
#-----------------------------------------------------------------------------#
class GradientPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.Bind(wx.EVT_PAINT, self.OnPaint)
def OnPaint(self, evt):
dc = wx.PaintDC(self)
gc = wx.GraphicsContext.Create(dc)
col1 = wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DSHADOW)
col2 = platebtn.AdjustColour(col1, -90)
col1 = platebtn.AdjustColour(col1, 90)
rect = self.GetClientRect()
grad = gc.CreateLinearGradientBrush(0, 1, 0, rect.height - 1, col2, col1)
pen_col = tuple([min(190, x) for x in platebtn.AdjustColour(col1, -60)])
gc.SetPen(gc.CreatePen(wx.Pen(pen_col, 1)))
gc.SetBrush(grad)
gc.DrawRectangle(0, 1, rect.width - 0.5, rect.height - 0.5)
evt.Skip()
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
class TestLog:
def __init__(self):
pass
def write(self, msg):
print(msg)
#----------------------------------------------------------------------
overview = platebtn.__doc__
#----------------------------------------------------------------------
# Icon Data
# All icons from the Tango Icon Set
from wx.lib.embeddedimage import PyEmbeddedImage
Book = PyEmbeddedImage(
b"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAkdJ"
b"REFUOI2VkslrU1EUh787vJeXNNpMGA01DlECrd0UWrD+A06oIIoLwY24FwQRNIu6EBeKy4Jb"
b"XRS6dEK0WrC2XSgFFZq0RUoHmg4WYslrxuuiGpJWKv4295zLOd/vnssR/FbX8ZvXQTzAGPPn"
b"ziAMgJAie//ifKL7ZC6pjOwylJc01WGiHUsaoLe3tz+bzZ6fmJwisxQTd25d3iAIwMDde09i"
b"L76W3GhbjL37jyLtfaysvP0QXf52VQF0dnb2pVIpdvj9DAx/JxiN8Dk9x+TMMm6hxMT4NDeu"
b"HaG97RB281ksZz9Se+OyNDqvqVMul6NcyrOyukp2cRUAR1cpl/LkfpYpV8voiotQkmJ+Bo+U"
b"OVkPqFQqVErrFAsuBXeNortGad2lXHT5Ml5gZk5QyQ9g8q+ZnR6magqfGgAAQtlobWPZHpTl"
b"QWkLIS32xOKEdp/ArbYjrBijYxaW+2OsASCEQFteLMeH7fjxeJuwHC/a8jI3O0c6M4URMbA7"
b"mF9sQhwYXNebXyClQkqF0hYASllIpWlpaSGZTIIQCKExRgDQALBtuxbvCvmpVmsrgeNVm702"
b"DOsTn88HgDHw8c0IIwOjtb1q8jUjhNge4PF4AHj/aoh4bCcH4wHevRzCGMPCwgLpdHoLoGEE"
b"x3EAuHLhGJfOdSOlpP/ZCI+fDtLa2koikdgeEIlEeHj7NEopMpkMQgjaDwd5lDpDMBgE2DJG"
b"AyAQCBAIBBBC1Arrz7/9QQMgFAptKfiXNEA4HH7e09Nz6n8aw+FwH8AvVXjA30OIWrcAAAAA"
b"SUVORK5CYII=")
Address = PyEmbeddedImage(
b"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAmtJ"
b"REFUOI19kstLlFEYxn/nfN+ZizPOeBnHS1OklmQSFVjRH9CiTUKbIKRl4V8QuFBwIUTtgqBF"
b"u3BfuW0XtAhsYWk6BqOZl7mgc/+c73JafKmZ1gNnc+D9ned5ziu0fqievep/Hg83HpXqAVa2"
b"2inXgmgE/9HUzPToJICYfvn0xe0r22MXL9wEM0WusEQ6N8DmbjMaiZQ+aB9XtRqUNmd5cH2c"
b"eMfIY1NJPTbQP4QZuQUiRDJhkC/OsZrrI7haQdX38JRBpStBPRGnsy1CCYh3jFDMvXliWrbC"
b"A/AskBLX3sauS2KrWQo9Z2hEmzCtPeKZTVxlQFuEpmDjIIu5stlGrtxGB+9xXYHjZimmY+wk"
b"ezAaNsn5FbQQ7JxLEfuRhd5OwgH7EGC7BmU9jFXcIp9dIpG8RimTYW8wSmIhQymVJLpVwAkF"
b"UDULgI/fLGy1zNe5JiRAQIWxvG62iuepub14tkQLgXRctGGAEATLNZxw4Nh3SAAh/I61MPyr"
b"SAhVtbBam/3XA4rY2jal050AeEQPIxzl+SCvt4tY+ifFs9044SCIozsRyijuzsDlxZoP0GjQ"
b"kGyP+046WijtOcTWs6hqHTegyA/1nbhRvgMNSpm0tESxbIfO1gixcBAGTh34ih/x+BegUq3z"
b"bvYDptR42j+OoxFSEFAGQggE4LoemfW811Ury++LiwD3TIAv8yvcGIwcrO1JklJSEy0sFxpy"
b"rfcSw7/NmAC7lSp9yWZSqZ5jg+KPAhfSGyd2MPUpnZ9w6jVvI9+QoZDCsZ0jgfcRjutx52q7"
b"fvu5cEAVgLw//noSmPin/xM0Mz0qAH4B9vTxRRZgeg8AAAAASUVORK5CYII=")
Home = PyEmbeddedImage(
b"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAABqNJ"
b"REFUWIXVl2uMlOUVx3/P817mtjN77Sxy2WFZroLQBbEtyYaAbSG1Ng1JJVZrQ6Jp1ARMI0m/"
b"mX7wi58MprGXBARTUW6JtiZuJSUFSXBrEQxWQcCVhWVhWYady84777zP8/TDXNhdhmvih57k"
b"Td6Z97zn/M///M+ZZ+D/2d6R8sW/CjG6E2bcawxxLy/tAhfL2pFsa1s7taEhcay/f7Ck1A+f"
b"gC+/dQA7IBm2rA/ndnXN7Zw6NVwcGSGTy/HpwMAlPwh+9gT03U08eTfOb8LSqOt++uDSpYs6"
b"Z84MIyUGiLgu3R0d7SHHef8t+NG3AmCXlL9KxGL/6OnpmdqWTEqkBMsCITBAyHHoTqXaQq77"
b"9jtSPn6ncW/bgpdA3m9Zr7Umk79ctnx5kyUlKAVao69dQ6fTFJRCAQYItObEwMBVz/d/v17r"
b"LbeLb92yamhMWtb+rnnz1i5+6KGEtCruQqBHRjDpNCWlCFkWyhiUMSAEbYlEJON53/+51ond"
b"xvzznhjYDnOjtt3bvWLF9PZUykZrUAoTBOhz5whGRzmeHuHi5SssS6WYEo0ypjWeMRhAA2eH"
b"hjKZfH7PL5R6WpQJusHqauAty3okEYt91PPoozPb58yxsSyQEqMU+uxZxq5e5eORyzS+sIkH"
b"jx7lm8YYZ3JZolISrmgCY+hsb0+0JBKP7bbtdw+AfUcM7LLtlxKtrZu+t3ZtsxOJlKvWGpPJ"
b"oE6e5Go2y+eFHKk/vE7kgcUopRBaM/S7F7GOHmdhPEERyCiFoVz2UDpdGE6n/5NWas1vYKwu"
b"gG0Qjkm5d8a8eT2LVq+Oi/Fiu3QJdeoU53I5LkTCdL2xHdnaShAESCmRUqK1ZuSPr5PbuZPu"
b"xiaUEFxTCg0YY7iWzRYvXLlyMq/Uyg1wrZrXAngTpkcs6/CSnp5lc1aujFbHCyHQ/f0Ep07x"
b"eS5H7v4FzN6+AxmPo5QiEonQ0tJCLBZDKUV46TKcjhl80dvLFNshatt4xqABNxSyo6FQqzc2"
b"9thPjNn9LuRqDLwt5WDPunVTmrq6yoxojQkC1Gef4Q0McCybpfmpp2h/7jmUUmiticVihMPh"
b"Ce0rFAoUCgW8Eyc48+yzPBAK0RAKMawUpYo4i56nv7l48YJSatXjcEYC2FIWm2bPFlgWWBZG"
b"KYK+PjJff01fJsP0V15hyvPPY4zBsiwaGxtxXRetde1SSuG6Lg0NDcSWLGH+3r2ckJIrhQJJ"
b"28atiNMJhWTntGkzLMv618QpqGw2k88THDzI0PnzHNOahXv20LhqFcYYbNsmGo1WSJqYvHov"
b"hCASiRCeNo1F773HuWSS/nyepG0TqYCQjoMtpblhDM2lS5QOHODU5cucu+8+unt7Cc+aBYDj"
b"ODiOMyHx5OTjL8dxcBobWbBrF/nly/lvNst3LIu4lEhxffhqANTp0xQPHeLI4CDB6tUs3rcP"
b"GY9jjEFKiRDijpJPZkM6DrNefZXwk0/y8cgIzVLSLK/XbQMEWoeHjhzhZDqdHVu/Prb05Zel"
b"1hpMeXkZYzBm4iKrfhZCYNs2QogbfKp+SimmbdzIwdOnS8H+/cH8RCJSVKqhBqCo9epPBgYO"
b"d27Y8Nt9zc3bHrYsgiBg//4PyY/la4kmWzQSY82aNWzZ8hq+X7zhOYDjhnhh00Z832d49uyg"
b"8/DhH38yOPj3ktYragB+DV9gTAtbt7J58+ZtNVWHXL46c3JCQCFEDcyC+QsxxuD7RVTqEVzH"
b"wrElSmn8kqZYCuDCBxhjKJVKSCkL6y5e/AhoukED463aw44ZHdi2XROg67q1e8dxSHWk0FpX"
b"gIEUYAlRFtk4wqptMHV6VPcHwhiD1ppUKoVt13UBoK2trQZgzAso+oqxYsBovkgmX6RQVPwg"
b"Wi6osrb9uwJg2XatElG5kYxhi358Mx8ENQD//nKoPsrodUaB0uTHt2xBNbGgQqkAR3zFT1f8"
b"DUtkgfrKr2dKKYBg8vc3ZUApBcZMVL8An++y+9CiWmvKS8dlVdPxuoltp7yyS6USQgjvrgDY"
b"jkO5+Ikj6DhOGY8Az/N45pmn645pNVaxWKzuktsyEKpWpbVGClk38HhAvu/j+9e1Va8lUkpK"
b"pRJBEGigjfJ5IJgMwAVagcDzPDsej0+Y+ZtZPB6/5fMqoOHhYXzfV8CUSt7LgB4PwAbc0dHR"
b"N7Zu3brBGHPLEzPAn//0l9u5jDevr69vJxCmXKyAiWdCQZmBViBRcbr5Erg78ymfvgrAKHAe"
b"8CYDqJoDRCpI7+nPax0zlHeAV7lqQvkfoK5c5SC5ZcEAAAAASUVORK5CYII=")
Monkey = PyEmbeddedImage(
b"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAACDhJ"
b"REFUWIXtlmuMXVUVgL+99zn33HvnPmbmDtOZYabTKXSGmdAWi6JAi6U1GgMN2qAIgYgaMAKJ"
b"4g8jRhN+GGNMTESDxogQEhtjIiAYqT+0pfZhkdBSSqedFqbTofN+3Lnv89zbH/dOGNtiUjXR"
b"H65kZWc/17fWXmftA/+X/7KIy1m8eSg1JNB3xIS+UxvRE2nRDKCkWZLCvOsb+VuDfPHAcHn4"
b"Pwqwub9pg23rJ1PJ1KYN67qsdb1dsZZMikQiAUCtWiVfLHNmfNI/dnoyqNYqR/1APnzgdOXN"
b"fxtg6zWJh51E4gcfv3F9/Jq+LlmdG6OanyCoLmGCGgBWLImVzJJs7SbZtoaTo+/q3QePu4Hn"
b"feOVU7Un/2WAW4fi32zONn/77ttubgoWzpEfPw7aRwmQDRWAAbSpK5ZDa+91iHQHu36/v1Ks"
b"VL67d9j9/mUD3DLQtL4pLvffv3N7dv7sEZbKFdxIYjAkbEkyLJBRPlJAKYpRUllqgUYiiCtN"
b"W7aZ5tVD/PK5PYWax5a/jFSOXxbAtsHE8O23XDvoBEvMlYps2fwxBoc2oKwY09MT7NmzG7M0"
b"Xj+geTXbtt1GZ+eVhFHIqVNvsX/vbjraWikHNn84dPrknpO1ofcFuGkgl46p6o+M4W6MUFLy"
b"11Tc/sB9OzZnRk68xu07PktXRwdRdRETBQhlo+0MT+96FoAv3ns/0i9iQg+hLKzUFUzNzfPS"
b"C7sYWv9hnn5+T7EamKNacyPCRELwaz9Kfu3QyEJJAfR3ij1dzeoTH+qLJVpTUpU9VvddmYvH"
b"TIVMppkbNl1PsDSFCV1M6GN8Fxm5XDO0if6rB0iEBbRbwEQhJvTRboFsayeTM9OU8guEBqfm"
b"easHu2xroMOyQ82gFwTbzs4GT6tb+pOfbM+Ihwa7nNTpWcnQVWuZz1dF96pWjF9k44braYkL"
b"tFfChAEm8jGRj/arWGEVJ6rU56IAowOMDiGKAIN0UoyOjqBiTcwXamLT+iFOnS/Ql1NW1Quz"
b"uSb7mIzHeWxjj5U9u6j41hd2sPPmIXo7WjCA0YZkwkG7JUzgYoxmMu/xzJ/PMlUCYcUQlsNU"
b"GZ7Ze47JQoSQFsZotFsimUyitQZgoDvHzi3r+c5D93K+aLGxx8rG4zwmo4iBQAuG1vaQiQlM"
b"FNDelqNcqSGkwPUCUDYylkI6KR78yX4e3/U3HnjiFaSTRjppHnjiFR7/1as8+ON9SKe+TtpJ"
b"/AikUpSqLu25FrRfI2HBxsF1hFoQRQxIQGhtsASY0EfGkvT39TBb9InFU0zNzmI15ZBOEzLW"
b"xNhMAYCxmUIdIJ5mbHp5rFgHjTVhpduZmJrESaSYWqyyrrcHE3iY0MdSCq0NgJCWEmdiSnBi"
b"9Dyu6yKUw5qeKzk3tUgm28bJkWHCeBvCSSOdFI9+bivppMOj92xHOmmUk+LRe7bXx+6+te59"
b"PINOdXLixBuk0i1MzJfp6+4AKQmCgDdPnsFWAkuJM2LrYOKOzmb17Np2JztTVmy/YSMvHThB"
b"SyZGT4sg197NHZ+6F1OeRUgLoWyEskEqhJAs10JjNOiokYwRKtvN8889Q356lNHpGnPFgJ2b"
b"+zk8PE7Ccjk/7xamlqLPq7H5cGRV1v6MJGrtbtbW0bcnWSi4bLo6RyqVZvPWT/PG0ddZKFao"
b"+JpIKFAOltOEFUsirBghinItIF+sMDEzz9vnJjg5cpLrb7iVmakxMnHD6fEFphcXySU95pYC"
b"b3LJjOwdrn5dAHxkKNOaJPxZoNmpJOq6vmaRa4I77/oyuUySwA+YWaoys1hkdnGJ+XyRhaUi"
b"rusSaY1tW7Rks7TnWljV1kpXWwuduQxO3CFf9fnNsz9kruBzbLxiIk1kS56vYn3l8HBx0QI4"
b"PFxcBO766EexzGxitiWdaPH9Ei1tHRg3j+3EWd3dQm/fVUgrjrDjCMupX4eQGKProQ89TOCi"
b"Q7fRerRc0Y0XBLRmmjCmvCRX1dr37CNcLsVyZV3et49QQ0JKCcYglIWQCiFV/c6lArWcBw7S"
b"Tta/BDuJUHUgGnuQCqksEAKMQSqJhsS+FcYvAgBQMFeuuVjKIj8/jbATYExDaagBdN1zHdYT"
b"EH3RGmknWZybwLJsKjUfBXMX2rsIAMyL0wuVINRwaP9urGRr3Qsd1o1F/nuh9spor4T2yvVK"
b"GXr1eR0gAJVu49De36E1TOUrAZgXL+HwP0rPKuutUiX8Uk97OlFYmsVSkp7+TRD69TrPcjSi"
b"ej/034MKPUzkIaSFne3gtYMvc+z1/WgR4/hYsRhJ7hufC4v/FGB8Liz25iy9WPZu7GhNxd4d"
b"O8X83AS9/ZtwUs1gDEZHjUep8TjpsJEzNirZjGckf3zhKY4c/hNGORx5J192Pf29/afc3Rfa"
b"u9QPiQ3ENw8kfpFO2DvW96aTNh5KWVw1cC3XXreZ9q41NKVzWI4DCMIwoFpcZG56nLeOHuDM"
b"8BGiKCTA4fi5UrVY9V8+eNp9BHABr9FeEiDe0AQQ/+Ba55F0wnqoO5ewenJxy0QutpIIKYl0"
b"VE+2xilKKrTRhKFGqjjn8144PlcNC5XgqSNj/s8bRl2g1mirgLkQILFC40CiM6uuXtNuf9VW"
b"8qbWtBO1pa1EOm7hOArV2B0Z8PyIshsxVwzcxZKn/DB6dXQ2+OlsUY+uMLoMUAUql4qAABwg"
b"2QBwlttMk7qip9nakkmKLUqxTiJaNSJW32QCg1kMQ/NOocrB8/nwUKkWzTfCvazLxsuN/vvm"
b"wLIoILbiWmKAtUKXX6LlM+rFAUIgAoIVXruA35i/yOPLEXGB4WVdzga9AsRc6oD/Ofk7fswD"
b"nMQUbKYAAAAASUVORK5CYII=")
Devil = PyEmbeddedImage(
b"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAABrpJ"
b"REFUWIXtlluMlVcVx3/7u57LzLkMc5gbw2UovFRupQ1INdRkkkrIPDRpLSFREqk8oFKrPhmR"
b"oEkfNLFJ39SQkrZGJGRMarCpVWmlLa2OCjOtpVBmgJnOfc79zHf/tg/nO5PTEQEbE1/6T/45"
b"315n7b3+e+21sjd8iv8zRPPgtKKc9aV8UArxfT0Mf/4lcI+BocMXNME+BT4XQi6UtCqCigJz"
b"oeQNH055cO44uKfB8BTlGwKOqvDW42G49+4FmGrx4Z9uSt98J+988JupwmzFe6mi8NjKroS6"
b"bmOqNbcqKWKtOnpMxbMCrIrH3ERNXr9SrsxOLwatIYMrs8bAvY91Z7q3ZIyz37xU2h/KzO0E"
b"qM2DR6QwWztzu9ZoG81W22ipFQr39XQl4jv2dJlpLKHnC4iZBeTEHCyUMGoW6bQm+j7TbspJ"
b"J55NZ+974KEHkh2dveqNkXln+r35Hw9K+fpdZ+AFSKmKMvX5desSLYZB1XG4MH6d7dtBjYNq"
b"gtDqs2QAoQuhDUFNMjQs2L2uj5iuU3Vdzo+OLi5K2fkEVG4nQGse3IDWFVJyZWGOvkwbw9OT"
b"rO4FrQW0JKgxgdCbBDgQGBIUQU8nDH00zqbObq7lFyiDnIcUdxCgND4kCB3ObtycM4uexd8n"
b"b7Kyw6NrLWgtAq1VoKVBzwqMrEDPgJ6K/ktCzxrIZFyGJm5Q8izWb8zGdDgrl2V5OZZqwIB9"
b"3e3xL9+7tT3eVsjT2SVJrRR4cfj6JcmTb4f88SPYuUHQ2S5wEHz1lZAnL4Scm4dtGUFvAlYY"
b"kJYh2b52pVhyEr+zg7E/wbu3zcAx0DR4ZuuWlSnn0hRIiVDr5/3sqKQ3BdcOaRy4X/Dt34fo"
b"GcFP/hqyOiu4ckBl/3rB0ashiibq85A478+zeWNbSoNnji076mYIgB9Bf097/Mxnt3aknXeu"
b"o2cEepZ6qrMCLQtmZNMyAi0OXgGcgsQvSrwC2HlJUASvIHELEq8Ise1dXLicL02VnEePwh+i"
b"o44jxBmggpRPKwAm7Ovtbkn5ExVkWPdCggxBhhIR1ItOehBa4JchdCT4kT0ARYKUEtmYK8Gf"
b"rtHTnkiZsE+CKoX4IfAEMA4EwEgjNbuzKxLCvzgTLSiRvkD6IN16uwW2RGgCGUhQIptVb8PQ"
b"q1N61OcE9eX9gk3bPW0C2O3BTl3KUQEnpZQGoAmQGkAIuVhMZcSGMNBRanGCqXKUik+OlJak"
b"V1cIIVeA6Tg8DpwU4FJnvTgCSGuGigxCNgwOkuzoIJHLMTo6yrZt2zBNE9M0UdV604yMjLBp"
b"06aPBQvDkKGhIfr6+rBtm9nz57l++DCGJggg3QHXJiA7Df2dUT1A1AUqlLxFD+l56D09qKkU"
b"ALqu33Jnmnbrom72N7q68F0Hz/ZRoQSQhyNVeHYCdn1MgAJzds1Da0niTU7eMdDdCPALBTRF"
b"wbIDFJgD2AyFCnytDL/9EDYsCQBeX5i3ZFxXsC9fvuWCdyOg2W5duYKphCyUHAksXUjb4M0S"
b"/KoEJ5cEOHBqYqZWzrVr5J9//n+SgfkXXySb0pgs2GUHTjX7VeFEFXa9AesVAB9eWyg5lpbQ"
b"8D+8SnVo6BNloOFfHR7GHRtFM1UKNc/y4bVmvzJkK0ANQgXgOPg+PPXutWJ59eoEY0eO4C0s"
b"/MdAjW64lQCvVOKfBw+yqk3lvclq2YenjoMPcBrUX8BABV4ow5sPw9jSbfg9+PVsxR0r2H6Q"
b"M1yG9+4lnJ39rzIgy2X+0t9Pyi2Td8Nw0vKtSbj/u/CzH8Cr12CmDC+VYbYK+6DpOhYgyzBw"
b"8UapoKQM2pVFRvbs4caJE8gwvKOAm6dO8daOHbRU5tCTGv+YqSnXocOC75Tg0Dz0z8L4NHzr"
b"Kuw8BBNR3CUYQOwg7LhHcGZrbyqZM1V1fMZHJpKsO3CAnoEBkmvXEiYSqJZFbXycqZdfZuy5"
b"5wjyC/SkBbO+5OJMTV6GiSpMSphwYXgO3v4bfABYgB3RbQhQgHiDD0FfP5zMpYzOzd2tMcX2"
b"KRY9bHRc18N1HHTTwNB1YoFLulVDmiqX5hbdmZqX/yU8fbW+w0YwaxkbtlpzBmJNImJJaPkK"
b"7F8FhzNJXV2TMuIrErowNAVdUXD9ANcLmK/58mbVtYuWL0dgcBBe9WCxaZfWLb4t6k81f/lz"
b"SQcSgBkJMtPQ8kV4cC3saYEtCmQExCXYAZSK8P5l+PM5uGSBA3gRGxeO00QLqEW/cnkNNENE"
b"NdEQYkTitIhqdGwiYvQKIKR+z/sR3aYdu5Ht3wLdLRoBlSY2oyGgwYaoT3Fb/At4CANJRbmY"
b"kwAAAABJRU5ErkJggg==")
#-----------------------------------------------------------------------------#
if __name__ == '__main__':
try:
import sys
import run
except ImportError:
app = wx.App(False)
frame = wx.Frame(None, title="PlateButton Test")
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(TestPanel(frame, TestLog()), 1, wx.EXPAND)
frame.CreateStatusBar()
frame.SetSizer(sizer)
frame.SetInitialSize()
frame.Show()
app.MainLoop()
else:
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
|
|
# -*- coding: utf-8 -*-
"""
This module contains the Branch class (one branch of the tree) and the Nodes class
"""
import numpy as np
from multiprocessing.dummy import Pool as ThreadPool
from scipy.spatial import cKDTree
pool = ThreadPool(16)
class Branch:
"""Class that contains a branch of the fractal tree
Args:
mesh: an object of the mesh class, where the fractal tree will grow
init_node (int): initial node to grow the branch. This is an index that refers to a node in the nodes.nodes array.
init_dir (array): initial direction to grow the branch. In general, it refers to the direction of the last segment of the mother brach.
init_tri (int): the index of triangle of the mesh where the init_node sits.
l (float): total length of the branch
angle (float): angle (rad) with respect to the init_dir in the plane of the init_tri triangle
w (float): repulsitivity parameter. Controls how much the branches repel each other.
nodes: the object of the class nodes that contains all the nodes of the existing branches.
brother_nodes (list): the nodes of the brother and mother branches, to be excluded from the collision detection between branches.
Nsegments (int): number of segments to divide the branch.
Attributes:
child (list): contains the indexes of the child branches. It is not assigned when created.
dir (array): vector direction of the last segment of the branch.
nodes (list): contains the node indices of the branch. The node coordinates can be retrieved using nodes.nodes[i]
triangles (list): contains the indices of the triangles from the mesh where every node of the branch lies.
tri (int): triangle index where last node sits.
growing (bool): False if the branch collide or is out of the surface. True otherwise.
"""
def __init__(self,mesh,init_node,init_dir,init_tri,l,angle,w,nodes,brother_nodes,Nsegments):
# self.nnodes=0
self.child = [0,0]
self.dir = np.array([0.0,0.0,0.0])
self.nodes=[]
self.triangles=[]
# self.normal=np.array([0.0,0.0,0.0])
self.queue=[]
self.growing=True
shared_node=-1
init_normal=mesh.normals[init_tri]
nodes.update_collision_tree(brother_nodes)
# global_nnodes=len(nodes.nodes)
# R=np.array([[np.cos(angle),-np.sin(angle)],[ np.sin(angle), np.cos(angle)]])
inplane=-np.cross(init_dir,init_normal)
dir=np.cos(angle)*init_dir+np.sin(angle)*inplane
dir=dir/np.linalg.norm(dir)
self.nodes.append(init_node)
self.queue.append(nodes.nodes[init_node])
self.triangles.append(init_tri)
grad=nodes.gradient(self.queue[0])
dir=(dir+w*grad)/np.linalg.norm(dir+w*grad)
# print nodes.nodes[init_node]+dir*l/Nsegments
for i in range(1,Nsegments):
intriangle=self.add_node_to_queue(mesh,self.queue[i-1],dir*l/Nsegments)
#print 'intriangle',intriangle
if not intriangle:
print('Point not in triangle',i)
# print self.queue[i-1]+dir*l/50.
self.growing=False
break
collision=nodes.collision(self.queue[i])
if collision[1]<l/5.:
print("Collision",i, collision)
self.growing=False
self.queue.pop()
self.triangles.pop()
shared_node=collision[0]
break
grad=nodes.gradient(self.queue[i])
normal=mesh.normals[self.triangles[i],:]
#Project the gradient to the surface
grad=grad-(np.dot(grad,normal))*normal
dir=(dir+w*grad)/np.linalg.norm(dir+w*grad)
nodes_id=nodes.add_nodes(self.queue[1:])
[self.nodes.append(x) for x in nodes_id]
if not self.growing:
nodes.end_nodes.append(self.nodes[-1])
self.dir=dir
# #print self.triangles
self.tri=self.triangles[-1]
#Uncomment the following lines for a closed network
# if shared_node is not -1:
# self.nodes.append(shared_node)
def add_node_to_queue(self,mesh,init_node,dir):
"""Functions that projects a node in the mesh surface and it to the queue is it lies in the surface.
Args:
mesh: an object of the mesh class, where the fractal tree will grow
init_node (array): vector that contains the coordinates of the last node added in the branch.
dir (array): vector that contains the direction from the init_node to the node to project.
Return:
success (bool): true if the new node is in the triangle.
"""
# print 'node trying to project', init_node+dir
point, triangle=mesh.project_new_point(init_node+dir)
# print 'Projected point', point, 'dist', np.linalg.norm(point-init_node)
if triangle>=0:
self.queue.append(point)
self.triangles.append(triangle)
success=True
else:
# print point, triangle
success=False
#print 'Success? ',success
return success
class Nodes:
"""A class containing the nodes of the branches plus some fuctions to compute distance related quantities.
Args:
init_node (array): an array with the coordinates of the initial node of the first branch.
Attributes:
nodes (list): list of arrays containing the coordinates of the nodes
last_node (int): last added node.
end_nodes (list): a list containing the indices of all end nodes (nodes that are not connected) of the tree.
tree (scipy.spatial.cKDTree): a k-d tree to compute the distance from any point to the closest node in the tree. It is updated once a branch is finished.
collision_tree (scipy.spatial.cKDTree): a k-d tree to compute the distance from any point to the closest node in the tree, except from the brother and mother branches. It is used to check collision between branches.
"""
def __init__(self,init_node):
self.nodes=[]
self.nodes.append(init_node)
self.last_node=0
self.end_nodes=[]
self.tree=cKDTree(self.nodes)
def add_nodes(self,queue):
"""This function stores a list of nodes of a branch and returns the node indices. It also updates the tree to compute distances.
Args:
queue (list): a list of arrays containing the coordinates of the nodes of one branch.
Returns:
nodes_id (list): the indices of the added nodes.
"""
nodes_id=[]
for point in queue:
self.nodes.append(point)
self.last_node+=1
nodes_id.append(self.last_node)
self.tree=cKDTree(self.nodes)
return nodes_id
def distance_from_point(self,point):
"""This function returns the distance from any point to the closest node in the tree.
Args:
point (array): the coordinates of the point to calculate the distance from.
Returns:
d (float): the distance between point and the closest node in the tree.
"""
d,node=self.tree.query(point)
# distance=pool.map(lambda a: np.linalg.norm(a-point),self.nodes.values())
return d
def distance_from_node(self,node):
"""This function returns the distance from any node to the closest node in the tree.
Args:
node (int): the index of the node to calculate the distance from.
Returns:
d (float): the distance between specified node and the closest node in the tree.
"""
d, node = self.tree.query(self.nodes[node])
# distance=pool.map(lambda a: np.linalg.norm(a-self.nodes[node]),self.nodes.values())
return d
def update_collision_tree(self,nodes_to_exclude):
"""This function updates the collision_tree excluding a list of nodes from all the nodes in the tree. If all the existing nodes are excluded, one distant node is added.
Args:
nodes_to_exclude (list): contains the nodes to exclude from the tree. Usually it should be the mother and the brother branch nodes.
Returns:
none
"""
nodes=set(range(len(self.nodes)))
nodes=nodes.difference(nodes_to_exclude)
nodes_to_consider=[self.nodes[x] for x in nodes]
self.nodes_to_consider_keys=[x for x in nodes]
if len(nodes_to_consider)==0:
nodes_to_consider=[np.array([-100000000000.0,-100000000000.0,-100000000000.0])]
self.nodes_to_consider_keys=[100000000]
print("no nodes to consider")
self.collision_tree=cKDTree(nodes_to_consider)
def collision(self,point):
"""This function returns the distance between one point and the closest node in the tree and the index of the closest node using the collision_tree.
Args:
point (array): the coordinates of the point to calculate the distance from.
Returns:
collision (tuple): (distance to the closest node, index of the closest node)
"""
d,node=self.collision_tree.query(point)
collision=(self.nodes_to_consider_keys[node],d)
return collision
def gradient(self,point):
"""This function returns the gradient of the distance from the existing points of the tree from any point. It uses a central finite difference approximation.
Args:
point (array): the coordinates of the point to calculate the gradient of the distance from.
Returns:
grad (array): (x,y,z) components of gradient of the distance.
"""
delta=0.01
dx=np.array([delta,0,0])
dy=np.array([0.0,delta,0.0])
dz=np.array([0.0,0.0,delta])
distx_m=self.distance_from_point(point-dx)
distx_p=self.distance_from_point(point+dx)
disty_m=self.distance_from_point(point-dy)
disty_p=self.distance_from_point(point+dy)
distz_m=self.distance_from_point(point-dz)
distz_p=self.distance_from_point(point+dz)
grad=np.array([(distx_p-distx_m)/(2*delta),(disty_p-disty_m)/(2*delta),(distz_p-distz_m)/(2*delta)])
return grad
|
|
import datetime
from django.core.exceptions import PermissionDenied
from django.db.models import Q, Subquery
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
from django.views.generic.base import TemplateResponseMixin
from django.views.generic.list import BaseListView
from wagtail.admin.filters import (
LockedPagesReportFilterSet, SiteHistoryReportFilterSet, WorkflowReportFilterSet,
WorkflowTasksReportFilterSet)
from wagtail.admin.views.mixins import SpreadsheetExportMixin
from wagtail.core.models import (
Page, PageLogEntry, TaskState, UserPagePermissionsProxy, WorkflowState)
class ReportView(SpreadsheetExportMixin, TemplateResponseMixin, BaseListView):
header_icon = ""
page_kwarg = "p"
template_name = "wagtailadmin/reports/base_report.html"
title = ""
paginate_by = 50
filterset_class = None
def filter_queryset(self, queryset):
filters = None
if self.filterset_class:
filters = self.filterset_class(self.request.GET, queryset=queryset, request=self.request)
queryset = filters.qs
return filters, queryset
def dispatch(self, request, *args, **kwargs):
self.is_export = self.request.GET.get("export") in self.FORMATS
if self.is_export:
self.paginate_by = None
return self.as_spreadsheet(self.filter_queryset(self.get_queryset())[1], self.request.GET.get("export"))
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, *args, object_list=None, **kwargs):
queryset = object_list if object_list is not None else self.object_list
filters, queryset = self.filter_queryset(queryset)
context = super().get_context_data(*args, object_list=queryset, **kwargs)
context["title"] = self.title
context["header_icon"] = self.header_icon
context["filters"] = filters
return context
class PageReportView(ReportView):
template_name = "wagtailadmin/reports/base_page_report.html"
export_headings = {
"latest_revision_created_at": _("Updated"),
"status_string": _("Status"),
"content_type.model_class._meta.verbose_name.title": _("Type"),
}
list_export = [
"title",
"latest_revision_created_at",
"status_string",
"content_type.model_class._meta.verbose_name.title",
]
class LockedPagesView(PageReportView):
template_name = "wagtailadmin/reports/locked_pages.html"
title = _("Locked pages")
header_icon = "lock"
list_export = PageReportView.list_export + [
"locked_at",
"locked_by",
]
filterset_class = LockedPagesReportFilterSet
def get_filename(self):
return "locked-pages-report-{}".format(
datetime.datetime.today().strftime("%Y-%m-%d")
)
def get_queryset(self):
pages = (
UserPagePermissionsProxy(self.request.user).editable_pages()
| Page.objects.filter(locked_by=self.request.user)
).filter(locked=True)
self.queryset = pages
return super().get_queryset()
def dispatch(self, request, *args, **kwargs):
if not UserPagePermissionsProxy(request.user).can_remove_locks():
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
class WorkflowView(ReportView):
template_name = 'wagtailadmin/reports/workflow.html'
title = _('Workflows')
header_icon = 'tasks'
filterset_class = WorkflowReportFilterSet
export_headings = {
"page.id": _("Page ID"),
"page.content_type.model_class._meta.verbose_name.title": _("Page Type"),
"page.title": _("Page Title"),
"get_status_display": _("Status"),
"created_at": _("Started at")
}
list_export = [
"workflow",
"page.id",
"page.content_type.model_class._meta.verbose_name.title",
"page.title",
"get_status_display",
"requested_by",
"created_at",
]
def get_filename(self):
return "workflow-report-{}".format(
datetime.datetime.today().strftime("%Y-%m-%d")
)
def get_queryset(self):
pages = UserPagePermissionsProxy(self.request.user).editable_pages()
return WorkflowState.objects.filter(page__in=pages).order_by('-created_at')
class WorkflowTasksView(ReportView):
template_name = 'wagtailadmin/reports/workflow_tasks.html'
title = _('Workflow tasks')
header_icon = 'thumbtack'
filterset_class = WorkflowTasksReportFilterSet
export_headings = {
"workflow_state.page.id": _("Page ID"),
"workflow_state.page.content_type.model_class._meta.verbose_name.title": _("Page Type"),
"workflow_state.page.title": _("Page Title"),
"get_status_display": _("Status"),
"workflow_state.requested_by": _("Requested By")
}
list_export = [
"task",
"workflow_state.page.id",
"workflow_state.page.content_type.model_class._meta.verbose_name.title",
"workflow_state.page.title",
"get_status_display",
"workflow_state.requested_by",
"started_at",
"finished_at",
"finished_by",
]
def get_filename(self):
return "workflow-tasks-{}".format(
datetime.datetime.today().strftime("%Y-%m-%d")
)
def get_queryset(self):
pages = UserPagePermissionsProxy(self.request.user).editable_pages()
return TaskState.objects.filter(workflow_state__page__in=pages).order_by('-started_at')
class LogEntriesView(ReportView):
template_name = 'wagtailadmin/reports/site_history.html'
title = _('Site history')
header_icon = 'history'
filterset_class = SiteHistoryReportFilterSet
export_headings = {
"object_id": _("ID"),
"title": _("Title"),
"object_verbose_name": _("Type"),
"action": _("Action type"),
"timestamp": _("Date/Time")
}
list_export = [
"object_id",
"label",
"object_verbose_name",
"action",
"timestamp"
]
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.custom_field_preprocess['action'] = {
self.FORMAT_CSV: self.get_action_label,
self.FORMAT_XLSX: self.get_action_label
}
def get_filename(self):
return "audit-log-{}".format(
datetime.datetime.today().strftime("%Y-%m-%d")
)
def get_queryset(self):
q = Q(
page__in=UserPagePermissionsProxy(self.request.user).explorable_pages().values_list('pk', flat=True)
)
root_page_permissions = Page.get_first_root_node().permissions_for_user(self.request.user)
if (
self.request.user.is_superuser
or root_page_permissions.can_add_subpage() or root_page_permissions.can_edit()
):
# Include deleted entries
q = q | Q(page_id__in=Subquery(
PageLogEntry.objects.filter(deleted=True).values('page_id')
))
return PageLogEntry.objects.filter(q)
def get_action_label(self, action):
from wagtail.admin.log_action_registry import registry as log_action_registry
return force_str(log_action_registry.get_action_label(action))
|
|
# -*- coding: utf-8 -*-
"""This file contains SkyDrive log file parser in plaso."""
import pyparsing
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import errors
from plaso.lib import definitions
from plaso.parsers import logger
from plaso.parsers import manager
from plaso.parsers import text_parser
class SkyDriveLogEventData(events.EventData):
"""SkyDrive log event data.
Attributes:
detail (str): details.
log_level (str): log level.
module (str): name of the module that generated the log message.
source_code (str): source file and line number that generated the log
message.
"""
DATA_TYPE = 'skydrive:log:line'
def __init__(self):
"""Initializes event data."""
super(SkyDriveLogEventData, self).__init__(data_type=self.DATA_TYPE)
self.detail = None
self.log_level = None
self.module = None
self.source_code = None
class SkyDriveOldLogEventData(events.EventData):
"""SkyDrive old log event data.
Attributes:
log_level (str): log level.
source_code (str): source file and line number that generated the log
message.
text (str): log message.
"""
DATA_TYPE = 'skydrive:log:old:line'
def __init__(self):
"""Initializes event data."""
super(SkyDriveOldLogEventData, self).__init__(data_type=self.DATA_TYPE)
self.log_level = None
self.source_code = None
self.text = None
class SkyDriveLogParser(text_parser.PyparsingMultiLineTextParser):
"""Parses SkyDrive log files."""
NAME = 'skydrive_log'
DATA_FORMAT = 'OneDrive (or SkyDrive) log file'
_ENCODING = 'utf-8'
# Common SDF (SkyDrive Format) structures.
_COMMA = pyparsing.Literal(',').suppress()
_HYPHEN = text_parser.PyparsingConstants.HYPHEN
_THREE_DIGITS = text_parser.PyparsingConstants.THREE_DIGITS
_TWO_DIGITS = text_parser.PyparsingConstants.TWO_DIGITS
MSEC = pyparsing.Word(pyparsing.nums, max=3).setParseAction(
text_parser.PyParseIntCast)
IGNORE_FIELD = pyparsing.CharsNotIn(',').suppress()
# Date and time format used in the header is: YYYY-MM-DD-hhmmss.###
# For example: 2013-07-25-160323.291
_SDF_HEADER_DATE_TIME = pyparsing.Group(
text_parser.PyparsingConstants.DATE_ELEMENTS + _HYPHEN +
_TWO_DIGITS.setResultsName('hours') +
_TWO_DIGITS.setResultsName('minutes') +
_TWO_DIGITS.setResultsName('seconds') +
pyparsing.Literal('.').suppress() +
_THREE_DIGITS.setResultsName('milliseconds')).setResultsName(
'header_date_time')
# Date and time format used in lines other than the header is:
# MM-DD-YY,hh:mm:ss.###
# For example: 07-25-13,16:06:31.820
_SDF_DATE_TIME = (
_TWO_DIGITS.setResultsName('month') + _HYPHEN +
_TWO_DIGITS.setResultsName('day') + _HYPHEN +
_TWO_DIGITS.setResultsName('year') + _COMMA +
text_parser.PyparsingConstants.TIME_ELEMENTS + pyparsing.Suppress('.') +
_THREE_DIGITS.setResultsName('milliseconds')).setResultsName(
'date_time')
_SDF_HEADER_START = (
pyparsing.Literal('######').suppress() +
pyparsing.Literal('Logging started.').setResultsName('log_start'))
# Multiline entry end marker, matched from right to left.
_SDF_ENTRY_END = pyparsing.StringEnd() | _SDF_HEADER_START | _SDF_DATE_TIME
_SDF_LINE = (
_SDF_DATE_TIME + _COMMA +
IGNORE_FIELD + _COMMA + IGNORE_FIELD + _COMMA + IGNORE_FIELD + _COMMA +
pyparsing.CharsNotIn(',').setResultsName('module') + _COMMA +
pyparsing.CharsNotIn(',').setResultsName('source_code') + _COMMA +
IGNORE_FIELD + _COMMA + IGNORE_FIELD + _COMMA +
pyparsing.CharsNotIn(',').setResultsName('log_level') + _COMMA +
pyparsing.SkipTo(_SDF_ENTRY_END).setResultsName('detail') +
pyparsing.ZeroOrMore(pyparsing.lineEnd()))
_SDF_HEADER = (
_SDF_HEADER_START +
pyparsing.Literal('Version=').setResultsName('version_string') +
pyparsing.Word(pyparsing.nums + '.').setResultsName('version_number') +
pyparsing.Literal('StartSystemTime:').suppress() +
_SDF_HEADER_DATE_TIME +
pyparsing.Literal('StartLocalTime:').setResultsName(
'local_time_string') +
pyparsing.SkipTo(pyparsing.lineEnd()).setResultsName('details') +
pyparsing.lineEnd())
LINE_STRUCTURES = [
('logline', _SDF_LINE),
('header', _SDF_HEADER)
]
def _ParseHeader(self, parser_mediator, structure):
"""Parse header lines and store appropriate attributes.
['Logging started.', 'Version=', '17.0.2011.0627',
[2013, 7, 25], 16, 3, 23, 291, 'StartLocalTime', '<details>']
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
time_elements_tuple = self._GetValueFromStructure(
structure, 'header_date_time')
try:
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(time_elements_tuple))
return
details = self._GetValueFromStructure(structure, 'details')
local_time_string = self._GetValueFromStructure(
structure, 'local_time_string')
log_start = self._GetValueFromStructure(structure, 'log_start')
version_number = self._GetValueFromStructure(structure, 'version_number')
version_string = self._GetValueFromStructure(structure, 'version_string')
event_data = SkyDriveLogEventData()
# TODO: refactor detail to individual event data attributes.
event_data.detail = '{0!s} {1!s} {2!s} {3!s} {4!s}'.format(
log_start, version_string, version_number, local_time_string, details)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ParseLine(self, parser_mediator, structure):
"""Parses a logline and store appropriate attributes.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
time_elements_tuple = self._GetValueFromStructure(structure, 'date_time')
# TODO: what if time elements tuple is None.
# TODO: Verify if date and time value is locale dependent.
month, day_of_month, year, hours, minutes, seconds, milliseconds = (
time_elements_tuple)
year += 2000
time_elements_tuple = (
year, month, day_of_month, hours, minutes, seconds, milliseconds)
try:
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(time_elements_tuple))
return
# Replace newlines with spaces in structure.detail to preserve output.
# TODO: refactor detail to individual event data attributes.
detail = self._GetValueFromStructure(structure, 'detail')
if detail:
detail = detail.replace('\n', ' ')
event_data = SkyDriveLogEventData()
event_data.detail = detail
event_data.log_level = self._GetValueFromStructure(structure, 'log_level')
event_data.module = self._GetValueFromStructure(structure, 'module')
event_data.source_code = self._GetValueFromStructure(
structure, 'source_code')
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseRecord(self, parser_mediator, key, structure):
"""Parse each record structure and return an EventObject if applicable.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): identifier of the structure of tokens.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
"""
if key not in ('header', 'logline'):
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
if key == 'logline':
self._ParseLine(parser_mediator, structure)
elif key == 'header':
self._ParseHeader(parser_mediator, structure)
def VerifyStructure(self, parser_mediator, lines):
"""Verify that this file is a SkyDrive log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
try:
structure = self._SDF_HEADER.parseString(lines)
except pyparsing.ParseException:
logger.debug('Not a SkyDrive log file')
return False
time_elements_tuple = self._GetValueFromStructure(
structure, 'header_date_time')
try:
dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug(
'Not a SkyDrive log file, invalid date and time: {0!s}'.format(
time_elements_tuple))
return False
return True
class SkyDriveOldLogParser(text_parser.PyparsingSingleLineTextParser):
"""Parse SkyDrive old log files."""
NAME = 'skydrive_log_old'
DATA_FORMAT = 'OneDrive (or SkyDrive) old log file'
_ENCODING = 'utf-8'
_FOUR_DIGITS = text_parser.PyparsingConstants.FOUR_DIGITS
_TWO_DIGITS = text_parser.PyparsingConstants.TWO_DIGITS
# Common pyparsing objects.
_COLON = pyparsing.Literal(':')
_EXCLAMATION = pyparsing.Literal('!')
# Date and time format used in the header is: DD-MM-YYYY hhmmss.###
# For example: 08-01-2013 21:22:28.999
_DATE_TIME = pyparsing.Group(
_TWO_DIGITS.setResultsName('month') + pyparsing.Suppress('-') +
_TWO_DIGITS.setResultsName('day_of_month') + pyparsing.Suppress('-') +
_FOUR_DIGITS.setResultsName('year') +
text_parser.PyparsingConstants.TIME_MSEC_ELEMENTS).setResultsName(
'date_time')
_SOURCE_CODE = pyparsing.Combine(
pyparsing.CharsNotIn(':') +
_COLON +
text_parser.PyparsingConstants.INTEGER +
_EXCLAMATION +
pyparsing.Word(pyparsing.printables)).setResultsName('source_code')
_LOG_LEVEL = (
pyparsing.Literal('(').suppress() +
pyparsing.SkipTo(')').setResultsName('log_level') +
pyparsing.Literal(')').suppress())
_LINE = (
_DATE_TIME + _SOURCE_CODE + _LOG_LEVEL +
_COLON + pyparsing.SkipTo(pyparsing.lineEnd).setResultsName('text'))
# Sometimes the timestamped log line is followed by an empty line,
# then by a file name plus other data and finally by another empty
# line. It could happen that a logline is split in two parts.
# These lines will not be discarded and an event will be generated
# ad-hoc (see source), based on the last one if available.
_NO_HEADER_SINGLE_LINE = (
pyparsing.NotAny(_DATE_TIME) +
pyparsing.Optional(pyparsing.Literal('->').suppress()) +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName('text'))
# Define the available log line structures.
LINE_STRUCTURES = [
('logline', _LINE),
('no_header_single_line', _NO_HEADER_SINGLE_LINE),
]
def __init__(self):
"""Initializes a parser."""
super(SkyDriveOldLogParser, self).__init__()
self._last_date_time = None
self._last_event_data = None
self.offset = 0
def _ParseLogline(self, parser_mediator, structure):
"""Parse a logline and store appropriate attributes.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
time_elements_tuple = self._GetValueFromStructure(structure, 'date_time')
# TODO: what if time elements tuple is None.
# TODO: Verify if date and time value is locale dependent.
month, day_of_month, year, hours, minutes, seconds, milliseconds = (
time_elements_tuple)
time_elements_tuple = (
year, month, day_of_month, hours, minutes, seconds, milliseconds)
try:
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(time_elements_tuple))
return
event_data = SkyDriveOldLogEventData()
event_data.log_level = self._GetValueFromStructure(structure, 'log_level')
event_data.offset = self.offset
event_data.source_code = self._GetValueFromStructure(
structure, 'source_code')
event_data.text = self._GetValueFromStructure(structure, 'text')
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
self._last_date_time = date_time
self._last_event_data = event_data
def _ParseNoHeaderSingleLine(self, parser_mediator, structure):
"""Parse an isolated header line and store appropriate attributes.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
if not self._last_event_data:
logger.debug('SkyDrive, found isolated line with no previous events')
return
event_data = SkyDriveOldLogEventData()
event_data.offset = self._last_event_data.offset
event_data.text = self._GetValueFromStructure(structure, 'text')
event = time_events.DateTimeValuesEvent(
self._last_date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
# TODO think to a possible refactoring for the non-header lines.
self._last_date_time = None
self._last_event_data = None
def ParseRecord(self, parser_mediator, key, structure):
"""Parse each record structure and return an EventObject if applicable.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): identifier of the structure of tokens.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
"""
if key not in ('logline', 'no_header_single_line'):
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
if key == 'logline':
self._ParseLogline(parser_mediator, structure)
elif key == 'no_header_single_line':
self._ParseNoHeaderSingleLine(parser_mediator, structure)
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a SkyDrive old log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
try:
structure = self._LINE.parseString(line)
except pyparsing.ParseException:
logger.debug('Not a SkyDrive old log file')
return False
time_elements_tuple = self._GetValueFromStructure(structure, 'date_time')
# TODO: what if time elements tuple is None.
day_of_month, month, year, hours, minutes, seconds, milliseconds = (
time_elements_tuple)
time_elements_tuple = (
year, month, day_of_month, hours, minutes, seconds, milliseconds)
try:
dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug(
'Not a SkyDrive old log file, invalid date and time: {0!s}'.format(
time_elements_tuple))
return False
return True
manager.ParsersManager.RegisterParsers([
SkyDriveLogParser, SkyDriveOldLogParser])
|
|
# -*- coding: utf-8 -*-
'''
Manage RabbitMQ Users
=====================
Example:
.. code-block:: yaml
rabbit_user:
rabbitmq_user.present:
- password: password
- force: True
- tags:
- monitoring
- user
- perms:
- '/':
- '.*'
- '.*'
- '.*'
- runas: rabbitmq
'''
# Import python libs
from __future__ import absolute_import
import logging
# Import salt libs
import salt.utils
import salt.ext.six as six
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if RabbitMQ is installed.
'''
return salt.utils.which('rabbitmqctl') is not None
def _check_perms_changes(name, newperms, runas=None, existing=None):
'''
Check whether Rabbitmq user's permissions need to be changed.
'''
if not newperms:
return False
if existing is None:
try:
existing = __salt__['rabbitmq.list_user_permissions'](name, runas=runas)
except CommandExecutionError as err:
log.error('Error: {0}'.format(err))
return False
perm_need_change = False
for vhost_perms in newperms:
for vhost, perms in vhost_perms.iteritems():
if vhost in existing:
existing_vhost = existing[vhost]
if perms != existing_vhost:
# This checks for setting permissions to nothing in the state,
# when previous state runs have already set permissions to
# nothing. We don't want to report a change in this case.
if existing_vhost == '' and perms == ['', '', '']:
continue
perm_need_change = True
else:
perm_need_change = True
return perm_need_change
def _check_tags_changes(name, new_tags, runas=None):
'''
Whether Rabbitmq user's tags need to be changed
'''
if new_tags:
if isinstance(new_tags, str):
new_tags = new_tags.split()
try:
users = __salt__['rabbitmq.list_users'](runas=runas)[name] - set(new_tags)
except CommandExecutionError as err:
log.error('Error: {0}'.format(err))
return []
return list(users)
else:
return []
def present(name,
password=None,
force=False,
tags=None,
perms=(),
runas=None):
'''
Ensure the RabbitMQ user exists.
name
User name
password
User's password, if one needs to be set
force
If user exists, forcibly change the password
tags
Optional list of tags for the user
perms
A list of dicts with vhost keys and 3-tuple values
runas
Name of the user to run the command
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
try:
user = __salt__['rabbitmq.user_exists'](name, runas=runas)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if user and not any((force, perms, tags)):
log.debug('RabbitMQ user \'{0}\' exists and force is not set.'.format(name))
ret['comment'] = 'User \'{0}\' is already present.'.format(name)
ret['result'] = True
return ret
if not user:
ret['changes'].update({'user':
{'old': '',
'new': name}})
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User \'{0}\' is set to be created.'.format(name)
return ret
log.debug('RabbitMQ user \'{0}\' doesn\'t exist - Creating.'.format(name))
try:
__salt__['rabbitmq.add_user'](name, password, runas=runas)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
else:
log.debug('RabbitMQ user \'{0}\' exists'.format(name))
if force:
if password is not None:
if not __opts__['test']:
try:
__salt__['rabbitmq.change_password'](name, password, runas=runas)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
ret['changes'].update({'password':
{'old': '',
'new': 'Set password.'}})
else:
if not __opts__['test']:
log.debug('Password for {0} is not set - Clearing password.'.format(name))
try:
__salt__['rabbitmq.clear_password'](name, runas=runas)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
ret['changes'].update({'password':
{'old': 'Removed password.',
'new': ''}})
new_tags = _check_tags_changes(name, tags, runas=runas)
if new_tags:
if not __opts__['test']:
try:
__salt__['rabbitmq.set_user_tags'](name, tags, runas=runas)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
ret['changes'].update({'tags':
{'old': tags,
'new': list(new_tags)}})
try:
existing_perms = __salt__['rabbitmq.list_user_permissions'](name, runas=runas)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if _check_perms_changes(name, perms, runas=runas, existing=existing_perms):
for vhost_perm in perms:
for vhost, perm in six.iteritems(vhost_perm):
if not __opts__['test']:
try:
__salt__['rabbitmq.set_permissions'](
vhost, name, perm[0], perm[1], perm[2], runas=runas
)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
new_perms = {vhost: perm}
if existing_perms != new_perms:
if ret['changes'].get('perms') is None:
ret['changes'].update({'perms':
{'old': {},
'new': {}}})
ret['changes']['perms']['old'].update(existing_perms)
ret['changes']['perms']['new'].update(new_perms)
ret['result'] = True
if ret['changes'] == {}:
ret['comment'] = '\'{0}\' is already in the desired state.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Configuration for \'{0}\' will change.'.format(name)
return ret
ret['comment'] = '\'{0}\' was configured.'.format(name)
return ret
def absent(name,
runas=None):
'''
Ensure the named user is absent
name
The name of the user to remove
runas
User to run the command
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
try:
user_exists = __salt__['rabbitmq.user_exists'](name, runas=runas)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
if user_exists:
if not __opts__['test']:
try:
__salt__['rabbitmq.delete_user'](name, runas=runas)
except CommandExecutionError as err:
ret['comment'] = 'Error: {0}'.format(err)
return ret
ret['changes'].update({'name':
{'old': name,
'new': ''}})
else:
ret['result'] = True
ret['comment'] = 'The user \'{0}\' is not present.'.format(name)
return ret
if __opts__['test'] and ret['changes']:
ret['result'] = None
ret['comment'] = 'The user \'{0}\' will be removed.'.format(name)
return ret
ret['result'] = True
ret['comment'] = 'The user \'{0}\' was removed.'.format(name)
return ret
|
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import webkitpy.tool.commands.rebaseline
from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
from webkitpy.common.checkout.scm.scm_mock import MockSCM
from webkitpy.common.host_mock import MockHost
from webkitpy.common.net.buildbot.buildbot_mock import MockBuilder
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.commands.rebaseline import *
from webkitpy.tool.mocktool import MockTool, MockOptions
class _BaseTestCase(unittest.TestCase):
MOCK_WEB_RESULT = 'MOCK Web result, convert 404 to None=True'
WEB_PREFIX = 'http://example.com/f/builders/WebKit Mac10.7/results/layout-test-results'
command_constructor = None
def setUp(self):
self.tool = MockTool()
self.command = self.command_constructor() # lint warns that command_constructor might not be set, but this is intentional; pylint: disable=E1102
self.command.bind_to_tool(self.tool)
self.lion_port = self.tool.port_factory.get_from_builder_name("WebKit Mac10.7")
self.lion_expectations_path = self.lion_port.path_to_generic_test_expectations_file()
self.tool.filesystem.write_text_file(self.tool.filesystem.join(self.lion_port.layout_tests_dir(), "VirtualTestSuites"),
'[]')
# FIXME: crbug.com/279494. We should override builders._exact_matches
# here to point to a set of test ports and restore the value in
# tearDown(), and that way the individual tests wouldn't have to worry
# about it.
def _expand(self, path):
if self.tool.filesystem.isabs(path):
return path
return self.tool.filesystem.join(self.lion_port.layout_tests_dir(), path)
def _read(self, path):
return self.tool.filesystem.read_text_file(self._expand(path))
def _write(self, path, contents):
self.tool.filesystem.write_text_file(self._expand(path), contents)
def _zero_out_test_expectations(self):
for port_name in self.tool.port_factory.all_port_names():
port = self.tool.port_factory.get(port_name)
for path in port.expectations_files():
self._write(path, '')
self.tool.filesystem.written_files = {}
def _setup_mock_builder_data(self):
data = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"userscripts": {
"first-test.html": {
"expected": "PASS",
"actual": "IMAGE+TEXT"
},
"second-test.html": {
"expected": "FAIL",
"actual": "IMAGE+TEXT"
}
}
}
});""")
# FIXME: crbug.com/279494 - we shouldn't be mixing mock and real builder names.
for builder in ['MOCK builder', 'MOCK builder (Debug)', 'WebKit Mac10.7']:
self.command._builder_data[builder] = data
class TestCopyExistingBaselinesInternal(_BaseTestCase):
command_constructor = CopyExistingBaselinesInternal
def setUp(self):
super(TestCopyExistingBaselinesInternal, self).setUp()
def test_copying_overwritten_baseline(self):
self.tool.executive = MockExecutive2()
# FIXME: crbug.com/279494. it's confusing that this is the test- port, and not the regular lion port. Really all of the tests should be using the test ports.
port = self.tool.port_factory.get('test-mac-snowleopard')
self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-mac-snowleopard/failures/expected/image-expected.txt'), 'original snowleopard result')
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
options = MockOptions(builder="MOCK SnowLeopard", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/failures/expected/image-expected.txt')), 'original snowleopard result')
self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [], "delete": []}\n')
def test_copying_overwritten_baseline_to_multiple_locations(self):
self.tool.executive = MockExecutive2()
# FIXME: crbug.com/279494. it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
port = self.tool.port_factory.get('test-win-win7')
self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK Trusty": {"port_name": "test-linux-trusty", "specifiers": set(["mock-specifier"])},
"MOCK Precise": {"port_name": "test-linux-precise", "specifiers": set(["mock-specifier"])},
"MOCK Linux 32": {"port_name": "test-linux-x86", "specifiers": set(["mock-specifier"])},
"MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
}
options = MockOptions(builder="MOCK Win7", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-linux-trusty/failures/expected/image-expected.txt')), 'original win7 result')
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-linux-precise/userscripts/another-test-expected.txt')))
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-linux-x86/userscripts/another-test-expected.txt')))
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/userscripts/another-test-expected.txt')))
self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [], "delete": []}\n')
def test_no_copy_existing_baseline(self):
self.tool.executive = MockExecutive2()
# FIXME: it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
port = self.tool.port_factory.get('test-win-win7')
self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK Trusty": {"port_name": "test-linux-trusty", "specifiers": set(["mock-specifier"])},
"MOCK Linux 32": {"port_name": "test-linux-x86", "specifiers": set(["mock-specifier"])},
"MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
}
options = MockOptions(builder="MOCK Win7", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-linux-trusty/failures/expected/image-expected.txt')), 'original win7 result')
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')), 'original win7 result')
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-linux-x86/userscripts/another-test-expected.txt')))
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/userscripts/another-test-expected.txt')))
self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [], "delete": []}\n')
def test_no_copy_skipped_test(self):
self.tool.executive = MockExecutive2()
port = self.tool.port_factory.get('test-win-win7')
fs = self.tool.filesystem
self._write(fs.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
expectations_path = fs.join(port.path_to_generic_test_expectations_file())
self._write(expectations_path, (
"[ Win ] failures/expected/image.html [ Failure ]\n"
"[ Linux ] failures/expected/image.html [ Skip ]\n"))
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK Trusty": {"port_name": "test-linux-trusty", "specifiers": set(["mock-specifier"])},
"MOCK Precise": {"port_name": "test-linux-precise", "specifiers": set(["mock-specifier"])},
"MOCK Linux 32": {"port_name": "test-linux-x86", "specifiers": set(["mock-specifier"])},
"MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
}
options = MockOptions(builder="MOCK Win7", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertFalse(fs.exists(fs.join(port.layout_tests_dir(), 'platform/test-mac-leopard/failures/expected/image-expected.txt')))
self.assertFalse(fs.exists(fs.join(port.layout_tests_dir(), 'platform/test-linux-trusty/failures/expected/image-expected.txt')))
self.assertFalse(fs.exists(fs.join(port.layout_tests_dir(), 'platform/test-linux-precise/failures/expected/image-expected.txt')))
self.assertFalse(fs.exists(fs.join(port.layout_tests_dir(), 'platform/test-linux-x86/failures/expected/image-expected.txt')))
self.assertEqual(self._read(fs.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')),
'original win7 result')
class TestRebaselineTest(_BaseTestCase):
command_constructor = RebaselineTest # AKA webkit-patch rebaseline-test-internal
def setUp(self):
super(TestRebaselineTest, self).setUp()
self.options = MockOptions(builder="WebKit Mac10.7", test="userscripts/another-test.html", suffixes="txt", results_directory=None)
def test_baseline_directory(self):
command = self.command
self.assertMultiLineEqual(command._baseline_directory("WebKit Mac10.7"), "/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-lion")
self.assertMultiLineEqual(command._baseline_directory("WebKit Mac10.6"), "/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-snowleopard")
self.assertMultiLineEqual(command._baseline_directory("WebKit Linux Trusty"), "/mock-checkout/third_party/WebKit/LayoutTests/platform/linux")
self.assertMultiLineEqual(command._baseline_directory("WebKit Linux"), "/mock-checkout/third_party/WebKit/LayoutTests/platform/linux-precise")
self.assertMultiLineEqual(command._baseline_directory("WebKit Linux 32"), "/mock-checkout/third_party/WebKit/LayoutTests/platform/linux-x86")
def test_rebaseline_updates_expectations_file_noop(self):
self._zero_out_test_expectations()
self._write(self.lion_expectations_path, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
""")
self._write("fast/dom/Window/window-postmessage-clone-really-deep-array.html", "Dummy test contents")
self._write("fast/css/large-list-of-rules-crash.html", "Dummy test contents")
self._write("userscripts/another-test.html", "Dummy test contents")
self.options.suffixes = "png,wav,txt"
self.command._rebaseline_test_and_update_expectations(self.options)
self.assertItemsEqual(self.tool.web.urls_fetched,
[self.WEB_PREFIX + '/userscripts/another-test-actual.png',
self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
""")
def test_rebaseline_test(self):
self.command._rebaseline_test("WebKit Linux Trusty", "userscripts/another-test.html", "txt", self.WEB_PREFIX)
self.assertItemsEqual(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
def test_rebaseline_test_with_results_directory(self):
self._write("userscripts/another-test.html", "test data")
self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ Failure ]\nbug(z) [ Linux ] userscripts/another-test.html [ Failure ]\n")
self.options.results_directory = '/tmp'
self.command._rebaseline_test_and_update_expectations(self.options)
self.assertItemsEqual(self.tool.web.urls_fetched, ['file:///tmp/userscripts/another-test-actual.txt'])
def test_rebaseline_reftest(self):
self._write("userscripts/another-test.html", "test data")
self._write("userscripts/another-test-expected.html", "generic result")
OutputCapture().assert_outputs(self, self.command._rebaseline_test_and_update_expectations, args=[self.options],
expected_logs="Cannot rebaseline reftest: userscripts/another-test.html\n")
self.assertDictEqual(self.command._scm_changes, {'add': [], 'remove-lines': [], "delete": []})
def test_rebaseline_test_and_print_scm_changes(self):
self.command._print_scm_changes = True
self.command._scm_changes = {'add': [], 'delete': []}
self.tool._scm.exists = lambda x: False
self.command._rebaseline_test("WebKit Linux Trusty", "userscripts/another-test.html", "txt", None)
self.assertDictEqual(self.command._scm_changes, {'add': ['/mock-checkout/third_party/WebKit/LayoutTests/platform/linux/userscripts/another-test-expected.txt'], 'delete': []})
def test_rebaseline_test_internal_with_port_that_lacks_buildbot(self):
self.tool.executive = MockExecutive2()
# FIXME: it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
port = self.tool.port_factory.get('test-win-win7')
self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK XP": {"port_name": "test-win-xp"},
"MOCK Win7": {"port_name": "test-win-win7"},
}
options = MockOptions(optimize=True, builder="MOCK Win7", suffixes="txt",
verbose=True, test="failures/expected/image.html", results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')), 'MOCK Web result, convert 404 to None=True')
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-win-xp/failures/expected/image-expected.txt')))
self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [{"test": "failures/expected/image.html", "builder": "MOCK Win7"}], "delete": []}\n')
class TestAbstractParallelRebaselineCommand(_BaseTestCase):
command_constructor = AbstractParallelRebaselineCommand
def test_builders_to_fetch_from(self):
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK XP": {"port_name": "test-win-xp"},
"MOCK Win7": {"port_name": "test-win-win7"},
"MOCK Win7 (dbg)(1)": {"port_name": "test-win-win7"},
"MOCK Win7 (dbg)(2)": {"port_name": "test-win-win7"},
}
builders_to_fetch = self.command._builders_to_fetch_from(["MOCK XP", "MOCK Win7 (dbg)(1)", "MOCK Win7 (dbg)(2)", "MOCK Win7"])
self.assertEqual(builders_to_fetch, ["MOCK XP", "MOCK Win7"])
finally:
builders._exact_matches = old_exact_matches
class TestRebaselineJson(_BaseTestCase):
command_constructor = RebaselineJson
def setUp(self):
super(TestRebaselineJson, self).setUp()
self.tool.executive = MockExecutive2()
self.old_exact_matches = builders._exact_matches
builders._exact_matches = {
"MOCK builder": {"port_name": "test-mac-snowleopard"},
"MOCK builder (Debug)": {"port_name": "test-mac-snowleopard"},
}
def tearDown(self):
builders._exact_matches = self.old_exact_matches
super(TestRebaselineJson, self).tearDown()
def test_rebaseline_test_passes_on_all_builders(self):
self._setup_mock_builder_data()
def builder_data():
self.command._builder_data['MOCK builder'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"userscripts": {
"first-test.html": {
"expected": "NEEDSREBASELINE",
"actual": "PASS"
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
options = MockOptions(optimize=True, verbose=True, results_directory=None)
self._write(self.lion_expectations_path, "Bug(x) userscripts/first-test.html [ Failure ]\n")
self._write("userscripts/first-test.html", "Dummy test contents")
self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder": ["txt", "png"]}})
# Note that we have one run_in_parallel() call followed by a run_command()
self.assertEqual(self.tool.executive.calls,
[[['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', '', 'userscripts/first-test.html', '--verbose']]])
def test_rebaseline_all(self):
self._setup_mock_builder_data()
options = MockOptions(optimize=True, verbose=True, results_directory=None)
self._write("userscripts/first-test.html", "Dummy test contents")
self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder": ["txt", "png"]}})
# Note that we have one run_in_parallel() call followed by a run_command()
self.assertEqual(self.tool.executive.calls,
[[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']],
[['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']],
[['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt,png', 'userscripts/first-test.html', '--verbose']]])
def test_rebaseline_debug(self):
self._setup_mock_builder_data()
options = MockOptions(optimize=True, verbose=True, results_directory=None)
self._write("userscripts/first-test.html", "Dummy test contents")
self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
# Note that we have one run_in_parallel() call followed by a run_command()
self.assertEqual(self.tool.executive.calls,
[[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']],
[['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']],
[['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt,png', 'userscripts/first-test.html', '--verbose']]])
def test_no_optimize(self):
self._setup_mock_builder_data()
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write("userscripts/first-test.html", "Dummy test contents")
self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
# Note that we have only one run_in_parallel() call
self.assertEqual(self.tool.executive.calls,
[[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']],
[['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']]])
def test_results_directory(self):
self._setup_mock_builder_data()
options = MockOptions(optimize=False, verbose=True, results_directory='/tmp')
self._write("userscripts/first-test.html", "Dummy test contents")
self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder": ["txt", "png"]}})
# Note that we have only one run_in_parallel() call
self.assertEqual(self.tool.executive.calls,
[[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--results-directory', '/tmp', '--verbose']],
[['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--results-directory', '/tmp', '--verbose']]])
class TestRebaselineJsonUpdatesExpectationsFiles(_BaseTestCase):
command_constructor = RebaselineJson
def setUp(self):
super(TestRebaselineJsonUpdatesExpectationsFiles, self).setUp()
self.tool.executive = MockExecutive2()
def mock_run_command(args,
cwd=None,
input=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=False,
env=None):
return '{"add": [], "remove-lines": [{"test": "userscripts/first-test.html", "builder": "WebKit Mac10.7"}]}\n'
self.tool.executive.run_command = mock_run_command
def test_rebaseline_updates_expectations_file(self):
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/first-test.html [ Failure ]\nbug(z) [ Linux ] userscripts/first-test.html [ Failure ]\n")
self._write("userscripts/first-test.html", "Dummy test contents")
self._setup_mock_builder_data()
self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Mac10.10 Mac10.9 Mac10.8 Retina Mac10.6 ] userscripts/first-test.html [ Failure ]\nbug(z) [ Linux ] userscripts/first-test.html [ Failure ]\n")
def test_rebaseline_updates_expectations_file_all_platforms(self):
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write(self.lion_expectations_path, "Bug(x) userscripts/first-test.html [ Failure ]\n")
self._write("userscripts/first-test.html", "Dummy test contents")
self._setup_mock_builder_data()
self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Android Linux Mac10.10 Mac10.9 Mac10.8 Retina Mac10.6 Win ] userscripts/first-test.html [ Failure ]\n")
def test_rebaseline_handles_platform_skips(self):
# This test is just like test_rebaseline_updates_expectations_file_all_platforms(),
# except that if a particular port happens to SKIP a test in an overrides file,
# we count that as passing, and do not think that we still need to rebaseline it.
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write(self.lion_expectations_path, "Bug(x) userscripts/first-test.html [ Failure ]\n")
self._write("NeverFixTests", "Bug(y) [ Android ] userscripts [ Skip ]\n")
self._write("userscripts/first-test.html", "Dummy test contents")
self._setup_mock_builder_data()
self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Linux Mac10.10 Mac10.9 Mac10.8 Retina Mac10.6 Win ] userscripts/first-test.html [ Failure ]\n")
def test_rebaseline_handles_skips_in_file(self):
# This test is like test_Rebaseline_handles_platform_skips, except that the
# Skip is in the same (generic) file rather than a platform file. In this case,
# the Skip line should be left unmodified. Note that the first line is now
# qualified as "[Linux Mac Win]"; if it was unqualified, it would conflict with
# the second line.
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write(self.lion_expectations_path,
("Bug(x) [ Linux Mac Win ] userscripts/first-test.html [ Failure ]\n"
"Bug(y) [ Android ] userscripts/first-test.html [ Skip ]\n"))
self._write("userscripts/first-test.html", "Dummy test contents")
self._setup_mock_builder_data()
self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(
new_expectations,
("Bug(x) [ Linux Mac10.10 Mac10.9 Mac10.8 Retina Mac10.6 Win ] userscripts/first-test.html [ Failure ]\n"
"Bug(y) [ Android ] userscripts/first-test.html [ Skip ]\n"))
def test_rebaseline_handles_smoke_tests(self):
# This test is just like test_rebaseline_handles_platform_skips, except that we check for
# a test not being in the SmokeTests file, instead of using overrides files.
# If a test is not part of the smoke tests, we count that as passing on ports that only
# run smoke tests, and do not think that we still need to rebaseline it.
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write(self.lion_expectations_path, "Bug(x) userscripts/first-test.html [ Failure ]\n")
self._write("SmokeTests", "fast/html/article-element.html")
self._write("userscripts/first-test.html", "Dummy test contents")
self._setup_mock_builder_data()
self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Linux Mac10.10 Mac10.9 Mac10.8 Retina Mac10.6 Win ] userscripts/first-test.html [ Failure ]\n")
class TestRebaseline(_BaseTestCase):
# This command shares most of its logic with RebaselineJson, so these tests just test what is different.
command_constructor = Rebaseline # AKA webkit-patch rebaseline
def test_rebaseline(self):
self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')]
self._write("userscripts/first-test.html", "test data")
self._zero_out_test_expectations()
self._setup_mock_builder_data()
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK builder": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
}
self.command.execute(MockOptions(results_directory=False, optimize=False, builders=None, suffixes="txt,png", verbose=True), ['userscripts/first-test.html'], self.tool)
finally:
builders._exact_matches = old_exact_matches
calls = filter(lambda x: x != ['qmake', '-v'] and x[0] != 'perl', self.tool.executive.calls)
self.assertEqual(calls,
[[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']],
[['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']]])
def test_rebaseline_directory(self):
self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')]
self._write("userscripts/first-test.html", "test data")
self._write("userscripts/second-test.html", "test data")
self._setup_mock_builder_data()
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK builder": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
}
self.command.execute(MockOptions(results_directory=False, optimize=False, builders=None, suffixes="txt,png", verbose=True), ['userscripts'], self.tool)
finally:
builders._exact_matches = old_exact_matches
calls = filter(lambda x: x != ['qmake', '-v'] and x[0] != 'perl', self.tool.executive.calls)
self.assertEqual(calls,
[[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose'],
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/second-test.html', '--verbose']],
[['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose'],
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/second-test.html', '--verbose']]])
class MockLineRemovingExecutive(MockExecutive):
def run_in_parallel(self, commands):
assert len(commands)
num_previous_calls = len(self.calls)
command_outputs = []
for cmd_line, cwd in commands:
out = self.run_command(cmd_line, cwd=cwd)
if 'rebaseline-test-internal' in cmd_line:
out = '{"add": [], "remove-lines": [{"test": "%s", "builder": "%s"}], "delete": []}\n' % (cmd_line[8], cmd_line[6])
command_outputs.append([0, out, ''])
new_calls = self.calls[num_previous_calls:]
self.calls = self.calls[:num_previous_calls]
self.calls.append(new_calls)
return command_outputs
class TestRebaselineExpectations(_BaseTestCase):
command_constructor = RebaselineExpectations
def setUp(self):
super(TestRebaselineExpectations, self).setUp()
self.options = MockOptions(optimize=False, builders=None, suffixes=['txt'], verbose=False, platform=None, results_directory=None)
def _write_test_file(self, port, path, contents):
abs_path = self.tool.filesystem.join(port.layout_tests_dir(), path)
self.tool.filesystem.write_text_file(abs_path, contents)
def _setup_test_port(self):
test_port = self.tool.port_factory.get('test')
original_get = self.tool.port_factory.get
def get_test_port(port_name=None, options=None, **kwargs):
if not port_name:
return test_port
return original_get(port_name, options, **kwargs)
# Need to make sure all the ports grabbed use the test checkout path instead of the mock checkout path.
# FIXME: crbug.com/279494 - we shouldn't be doing this.
self.tool.port_factory.get = get_test_port
return test_port
def test_rebaseline_expectations(self):
self._zero_out_test_expectations()
self.tool.executive = MockExecutive2()
def builder_data():
self.command._builder_data['MOCK SnowLeopard'] = self.command._builder_data['MOCK Leopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"userscripts": {
"another-test.html": {
"expected": "PASS",
"actual": "PASS TEXT"
},
"images.svg": {
"expected": "FAIL",
"actual": "IMAGE+TEXT"
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self._write("userscripts/another-test.html", "Dummy test contents")
self._write("userscripts/images.svg", "Dummy test contents")
self.command._tests_to_rebaseline = lambda port: {
'userscripts/another-test.html': set(['txt']),
'userscripts/images.svg': set(['png']),
'userscripts/not-actually-failing.html': set(['txt', 'png', 'wav']),
}
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.command.execute(self.options, [], self.tool)
finally:
builders._exact_matches = old_exact_matches
# FIXME: change this to use the test- ports.
calls = filter(lambda x: x != ['qmake', '-v'], self.tool.executive.calls)
self.assertEqual(self.tool.executive.calls, [
[
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'userscripts/another-test.html'],
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/another-test.html'],
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'png', '--builder', 'MOCK Leopard', '--test', 'userscripts/images.svg'],
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/images.svg'],
],
[
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'userscripts/another-test.html'],
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/another-test.html'],
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'MOCK Leopard', '--test', 'userscripts/images.svg'],
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/images.svg'],
],
])
def test_rebaseline_expectations_noop(self):
self._zero_out_test_expectations()
oc = OutputCapture()
try:
oc.capture_output()
self.command.execute(self.options, [], self.tool)
finally:
_, _, logs = oc.restore_output()
self.assertEqual(self.tool.filesystem.written_files, {})
self.assertEqual(logs, 'Did not find any tests marked Rebaseline.\n')
def disabled_test_overrides_are_included_correctly(self):
# This tests that the any tests marked as REBASELINE in the overrides are found, but
# that the overrides do not get written into the main file.
self._zero_out_test_expectations()
self._write(self.lion_expectations_path, '')
self.lion_port.expectations_dict = lambda: {
self.lion_expectations_path: '',
'overrides': ('Bug(x) userscripts/another-test.html [ Failure Rebaseline ]\n'
'Bug(y) userscripts/test.html [ Crash ]\n')}
self._write('/userscripts/another-test.html', '')
self.assertDictEqual(self.command._tests_to_rebaseline(self.lion_port), {'userscripts/another-test.html': set(['png', 'txt', 'wav'])})
self.assertEqual(self._read(self.lion_expectations_path), '')
def test_rebaseline_without_other_expectations(self):
self._write("userscripts/another-test.html", "Dummy test contents")
self._write(self.lion_expectations_path, "Bug(x) userscripts/another-test.html [ Rebaseline ]\n")
self.assertDictEqual(self.command._tests_to_rebaseline(self.lion_port), {'userscripts/another-test.html': ('png', 'wav', 'txt')})
def test_rebaseline_test_passes_everywhere(self):
test_port = self._setup_test_port()
old_builder_data = self.command.builder_data
def builder_data():
self.command._builder_data['MOCK Leopard'] = self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"fast": {
"dom": {
"prototype-taco.html": {
"expected": "FAIL",
"actual": "PASS",
"is_unexpected": true
}
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ Rebaseline ]
""")
self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
self.tool.executive = MockLineRemovingExecutive()
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.command.execute(self.options, [], self.tool)
self.assertEqual(self.tool.executive.calls, [])
# The mac ports should both be removed since they're the only ones in builders._exact_matches.
self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ Rebaseline ]
""")
finally:
builders._exact_matches = old_exact_matches
class _FakeOptimizer(BaselineOptimizer):
def read_results_by_directory(self, baseline_name):
if baseline_name.endswith('txt'):
return {'LayoutTests/passes/text.html': '123456'}
return {}
class TestOptimizeBaselines(_BaseTestCase):
command_constructor = OptimizeBaselines
def _write_test_file(self, port, path, contents):
abs_path = self.tool.filesystem.join(port.layout_tests_dir(), path)
self.tool.filesystem.write_text_file(abs_path, contents)
def setUp(self):
super(TestOptimizeBaselines, self).setUp()
# FIXME: This is a hack to get the unittest and the BaselineOptimize to both use /mock-checkout
# instead of one using /mock-checkout and one using /test-checkout.
default_port = self.tool.port_factory.get()
self.tool.port_factory.get = lambda port_name=None: default_port
def test_modify_scm(self):
test_port = self.tool.port_factory.get('test')
self._write_test_file(test_port, 'another/test.html', "Dummy test contents")
self._write_test_file(test_port, 'platform/mac-snowleopard/another/test-expected.txt', "result A")
self._write_test_file(test_port, 'another/test-expected.txt', "result A")
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard Debug": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
OutputCapture().assert_outputs(self, self.command.execute, args=[
MockOptions(suffixes='txt', no_modify_scm=False, platform='test-mac-snowleopard'),
['another/test.html'],
self.tool,
], expected_stdout='{"add": [], "remove-lines": [], "delete": []}\n')
finally:
builders._exact_matches = old_exact_matches
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'platform/mac/another/test-expected.txt')))
self.assertTrue(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'another/test-expected.txt')))
def test_no_modify_scm(self):
test_port = self.tool.port_factory.get('test')
self._write_test_file(test_port, 'another/test.html', "Dummy test contents")
self._write_test_file(test_port, 'platform/mac-snowleopard/another/test-expected.txt', "result A")
self._write_test_file(test_port, 'another/test-expected.txt', "result A")
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard Debug": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
OutputCapture().assert_outputs(self, self.command.execute, args=[
MockOptions(suffixes='txt', no_modify_scm=True, platform='test-mac-snowleopard'),
['another/test.html'],
self.tool,
], expected_stdout='{"add": [], "remove-lines": [], "delete": ["/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-snowleopard/another/test-expected.txt"]}\n')
finally:
builders._exact_matches = old_exact_matches
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'platform/mac/another/test-expected.txt')))
self.assertTrue(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'another/test-expected.txt')))
def test_optimize_all_suffixes_by_default(self):
test_port = self.tool.port_factory.get('test')
self._write_test_file(test_port, 'another/test.html', "Dummy test contents")
self._write_test_file(test_port, 'platform/mac-snowleopard/another/test-expected.txt', "result A")
self._write_test_file(test_port, 'platform/mac-snowleopard/another/test-expected.png', "result A png")
self._write_test_file(test_port, 'another/test-expected.txt', "result A")
self._write_test_file(test_port, 'another/test-expected.png', "result A png")
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard Debug": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
oc = OutputCapture()
oc.capture_output()
self.command.execute(MockOptions(suffixes='txt,wav,png', no_modify_scm=True, platform='test-mac-snowleopard'),
['another/test.html'],
self.tool)
finally:
out, err, logs = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertEquals(out, '{"add": [], "remove-lines": [], "delete": ["/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-snowleopard/another/test-expected.txt", "/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-snowleopard/another/test-expected.png"]}\n')
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'platform/mac/another/test-expected.txt')))
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'platform/mac/another/test-expected.png')))
self.assertTrue(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'another/test-expected.txt')))
self.assertTrue(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'another/test-expected.png')))
class TestAnalyzeBaselines(_BaseTestCase):
command_constructor = AnalyzeBaselines
def setUp(self):
super(TestAnalyzeBaselines, self).setUp()
self.port = self.tool.port_factory.get('test')
self.tool.port_factory.get = (lambda port_name=None, options=None: self.port)
self.lines = []
self.command._optimizer_class = _FakeOptimizer
self.command._write = (lambda msg: self.lines.append(msg)) # pylint bug warning about unnecessary lambda? pylint: disable=W0108
def test_default(self):
self.command.execute(MockOptions(suffixes='txt', missing=False, platform=None), ['passes/text.html'], self.tool)
self.assertEqual(self.lines,
['passes/text-expected.txt:',
' (generic): 123456'])
def test_missing_baselines(self):
self.command.execute(MockOptions(suffixes='png,txt', missing=True, platform=None), ['passes/text.html'], self.tool)
self.assertEqual(self.lines,
['passes/text-expected.png: (no baselines found)',
'passes/text-expected.txt:',
' (generic): 123456'])
class TestAutoRebaseline(_BaseTestCase):
command_constructor = AutoRebaseline
def _write_test_file(self, port, path, contents):
abs_path = self.tool.filesystem.join(port.layout_tests_dir(), path)
self.tool.filesystem.write_text_file(abs_path, contents)
def _setup_test_port(self):
test_port = self.tool.port_factory.get('test')
original_get = self.tool.port_factory.get
def get_test_port(port_name=None, options=None, **kwargs):
if not port_name:
return test_port
return original_get(port_name, options, **kwargs)
# Need to make sure all the ports grabbed use the test checkout path instead of the mock checkout path.
# FIXME: crbug.com/279494 - we shouldn't be doing this.
self.tool.port_factory.get = get_test_port
return test_port
def setUp(self):
super(TestAutoRebaseline, self).setUp()
self.command.latest_revision_processed_on_all_bots = lambda: 9000
self.command.bot_revision_data = lambda: [{"builder": "Mock builder", "revision": "9000"}]
def test_release_builders(self):
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK Leopard Debug": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
"MOCK Leopard ASAN": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.assertEqual(self.command._release_builders(), ['MOCK Leopard'])
finally:
builders._exact_matches = old_exact_matches
def test_tests_to_rebaseline(self):
def blame(path):
return """
624c3081c0 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/norebaseline.html [ Failure ]
624c3081c0 path/to/TestExpectations (<foobarbaz1@chromium.org@bbb929c8-8fbe-4397-9dbb-9b2b20218538> 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/norebaseline-email-with-hash.html [ Failure ]
624c3081c0 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-04-28 04:52:41 +0000 13) Bug(foo) path/to/rebaseline-without-bug-number.html [ NeedsRebaseline ]
624c3081c0 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/rebaseline-with-modifiers.html [ NeedsRebaseline ]
624c3081c0 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-04-28 04:52:41 +0000 12) crbug.com/24182 crbug.com/234 path/to/rebaseline-without-modifiers.html [ NeedsRebaseline ]
6469e754a1 path/to/TestExpectations (<foobarbaz1@chromium.org@bbb929c8-8fbe-4397-9dbb-9b2b20218538> 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/rebaseline-new-revision.html [ NeedsRebaseline ]
624caaaaaa path/to/TestExpectations (<foo@chromium.org> 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
0000000000 path/to/TestExpectations (<foo@chromium.org@@bbb929c8-8fbe-4397-9dbb-9b2b20218538> 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
"""
self.tool.scm().blame = blame
min_revision = 9000
self.assertEqual(self.command.tests_to_rebaseline(self.tool, min_revision, print_revisions=False), (
set(['path/to/rebaseline-without-bug-number.html', 'path/to/rebaseline-with-modifiers.html', 'path/to/rebaseline-without-modifiers.html']),
5678,
'624c3081c0',
'foobarbaz1@chromium.org',
set(['24182', '234']),
True))
def test_tests_to_rebaseline_over_limit(self):
def blame(path):
result = ""
for i in range(0, self.command.MAX_LINES_TO_REBASELINE + 1):
result += "624c3081c0 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-04-28 04:52:41 +0000 13) crbug.com/24182 path/to/rebaseline-%s.html [ NeedsRebaseline ]\n" % i
return result
self.tool.scm().blame = blame
expected_list_of_tests = []
for i in range(0, self.command.MAX_LINES_TO_REBASELINE):
expected_list_of_tests.append("path/to/rebaseline-%s.html" % i)
min_revision = 9000
self.assertEqual(self.command.tests_to_rebaseline(self.tool, min_revision, print_revisions=False), (
set(expected_list_of_tests),
5678,
'624c3081c0',
'foobarbaz1@chromium.org',
set(['24182']),
True))
def test_commit_message(self):
author = "foo@chromium.org"
revision = 1234
commit = "abcd567"
bugs = set()
self.assertEqual(self.command.commit_message(author, revision, commit, bugs),
"""Auto-rebaseline for r1234
https://chromium.googlesource.com/chromium/src/+/abcd567
TBR=foo@chromium.org
""")
bugs = set(["234", "345"])
self.assertEqual(self.command.commit_message(author, revision, commit, bugs),
"""Auto-rebaseline for r1234
https://chromium.googlesource.com/chromium/src/+/abcd567
BUG=234,345
TBR=foo@chromium.org
""")
def test_no_needs_rebaseline_lines(self):
def blame(path):
return """
6469e754a1 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/norebaseline.html [ Failure ]
"""
self.tool.scm().blame = blame
self.command.execute(MockOptions(optimize=True, verbose=False, results_directory=False), [], self.tool)
self.assertEqual(self.tool.executive.calls, [])
def test_execute(self):
def blame(path):
return """
6469e754a1 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-06-14 20:18:46 +0000 11) # Test NeedsRebaseline being in a comment doesn't bork parsing.
6469e754a1 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/norebaseline.html [ Failure ]
6469e754a1 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-04-28 04:52:41 +0000 13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
6469e754a1 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ SnowLeopard ] fast/dom/prototype-strawberry.html [ NeedsRebaseline ]
6469e754a1 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-04-28 04:52:41 +0000 12) crbug.com/24182 fast/dom/prototype-chocolate.html [ NeedsRebaseline ]
624caaaaaa path/to/TestExpectations (<foo@chromium.org> 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
0000000000 path/to/TestExpectations (<foo@chromium.org> 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
"""
self.tool.scm().blame = blame
test_port = self._setup_test_port()
old_builder_data = self.command.builder_data
def builder_data():
old_builder_data()
# have prototype-chocolate only fail on "MOCK Leopard".
self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"fast": {
"dom": {
"prototype-taco.html": {
"expected": "PASS",
"actual": "PASS TEXT",
"is_unexpected": true
},
"prototype-chocolate.html": {
"expected": "FAIL",
"actual": "PASS"
},
"prototype-strawberry.html": {
"expected": "PASS",
"actual": "IMAGE PASS",
"is_unexpected": true
}
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
crbug.com/24182 [ Debug ] path/to/norebaseline.html [ Rebaseline ]
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
crbug.com/24182 [ SnowLeopard ] fast/dom/prototype-strawberry.html [ NeedsRebaseline ]
crbug.com/24182 fast/dom/prototype-chocolate.html [ NeedsRebaseline ]
crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
""")
self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
self._write_test_file(test_port, 'fast/dom/prototype-strawberry.html', "Dummy test contents")
self._write_test_file(test_port, 'fast/dom/prototype-chocolate.html', "Dummy test contents")
self.tool.executive = MockLineRemovingExecutive()
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.command.tree_status = lambda: 'closed'
self.command.execute(MockOptions(optimize=True, verbose=False, results_directory=False, auth_refresh_token_json=None), [], self.tool)
self.assertEqual(self.tool.executive.calls, [])
self.command.tree_status = lambda: 'open'
self.tool.executive.calls = []
self.command.execute(MockOptions(optimize=True, verbose=False, results_directory=False, auth_refresh_token_json=None), [], self.tool)
self.assertEqual(self.tool.executive.calls, [
[
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-chocolate.html'],
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-strawberry.html'],
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-taco.html'],
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
],
[
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-chocolate.html'],
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-strawberry.html'],
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-taco.html'],
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
],
[
['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt,png', 'fast/dom/prototype-chocolate.html'],
['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'png', 'fast/dom/prototype-strawberry.html'],
['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt', 'fast/dom/prototype-taco.html'],
],
['git', 'cl', 'upload', '-f'],
['git', 'pull'],
['git', 'cl', 'land', '-f', '-v'],
['git', 'config', 'branch.auto-rebaseline-temporary-branch.rietveldissue'],
])
# The mac ports should both be removed since they're the only ones in builders._exact_matches.
self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
crbug.com/24182 [ Debug ] path/to/norebaseline.html [ Rebaseline ]
Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
crbug.com/24182 [ Linux Win ] fast/dom/prototype-chocolate.html [ NeedsRebaseline ]
crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
""")
finally:
builders._exact_matches = old_exact_matches
def test_execute_git_cl_hangs(self):
def blame(path):
return """
6469e754a1 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-04-28 04:52:41 +0000 13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
"""
self.tool.scm().blame = blame
test_port = self._setup_test_port()
old_builder_data = self.command.builder_data
def builder_data():
old_builder_data()
# have prototype-chocolate only fail on "MOCK Leopard".
self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"fast": {
"dom": {
"prototype-taco.html": {
"expected": "PASS",
"actual": "PASS TEXT",
"is_unexpected": true
}
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.command.SECONDS_BEFORE_GIVING_UP = 0
self.command.tree_status = lambda: 'open'
self.tool.executive = MockExecutive()
self.tool.executive.calls = []
self.command.execute(MockOptions(optimize=True, verbose=False, results_directory=False, auth_refresh_token_json=None), [], self.tool)
self.assertEqual(self.tool.executive.calls, [
[
['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
],
[
['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
],
[['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt', 'fast/dom/prototype-taco.html']],
['git', 'cl', 'upload', '-f'],
])
finally:
builders._exact_matches = old_exact_matches
def test_execute_test_passes_everywhere(self):
def blame(path):
return """
6469e754a1 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-04-28 04:52:41 +0000 13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
"""
self.tool.scm().blame = blame
test_port = self._setup_test_port()
old_builder_data = self.command.builder_data
def builder_data():
self.command._builder_data['MOCK Leopard'] = self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"fast": {
"dom": {
"prototype-taco.html": {
"expected": "FAIL",
"actual": "PASS",
"is_unexpected": true
}
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
self.tool.executive = MockLineRemovingExecutive()
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.command.tree_status = lambda: 'open'
self.command.execute(MockOptions(optimize=True, verbose=False, results_directory=False, auth_refresh_token_json=None), [], self.tool)
self.assertEqual(self.tool.executive.calls, [
[['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', '', 'fast/dom/prototype-taco.html']],
['git', 'cl', 'upload', '-f'],
['git', 'pull'],
['git', 'cl', 'land', '-f', '-v'],
['git', 'config', 'branch.auto-rebaseline-temporary-branch.rietveldissue'],
])
# The mac ports should both be removed since they're the only ones in builders._exact_matches.
self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
finally:
builders._exact_matches = old_exact_matches
def test_execute_use_alternate_rebaseline_branch(self):
def blame(path):
return """
6469e754a1 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-04-28 04:52:41 +0000 13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
"""
self.tool.scm().blame = blame
test_port = self._setup_test_port()
old_builder_data = self.command.builder_data
def builder_data():
self.command._builder_data['MOCK Win'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"fast": {
"dom": {
"prototype-taco.html": {
"expected": "FAIL",
"actual": "PASS",
"is_unexpected": true
}
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
self.tool.executive = MockLineRemovingExecutive()
old_exact_matches = builders._exact_matches
old_branch_name = webkitpy.tool.commands.rebaseline._get_branch_name_or_ref
try:
builders._exact_matches = {
"MOCK Win": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
}
self.command.tree_status = lambda: 'open'
webkitpy.tool.commands.rebaseline._get_branch_name_or_ref = lambda x: 'auto-rebaseline-temporary-branch'
self.command.execute(MockOptions(optimize=True, verbose=False, results_directory=False, auth_refresh_token_json=None), [], self.tool)
self.assertEqual(self.tool.executive.calls, [
[['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', '', 'fast/dom/prototype-taco.html']],
['git', 'cl', 'upload', '-f'],
['git', 'pull'],
['git', 'cl', 'land', '-f', '-v'],
['git', 'config', 'branch.auto-rebaseline-alt-temporary-branch.rietveldissue'],
])
self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
Bug(foo) [ Linux Mac XP ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
finally:
builders._exact_matches = old_exact_matches
webkitpy.tool.commands.rebaseline._get_branch_name_or_ref = old_branch_name
def test_execute_stuck_on_alternate_rebaseline_branch(self):
def blame(path):
return """
6469e754a1 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-04-28 04:52:41 +0000 13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
"""
self.tool.scm().blame = blame
test_port = self._setup_test_port()
old_builder_data = self.command.builder_data
def builder_data():
self.command._builder_data['MOCK Win'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"fast": {
"dom": {
"prototype-taco.html": {
"expected": "FAIL",
"actual": "PASS",
"is_unexpected": true
}
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
self.tool.executive = MockLineRemovingExecutive()
old_exact_matches = builders._exact_matches
old_branch_name = webkitpy.tool.commands.rebaseline._get_branch_name_or_ref
try:
builders._exact_matches = {
"MOCK Win": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
}
self.command.tree_status = lambda: 'open'
webkitpy.tool.commands.rebaseline._get_branch_name_or_ref = lambda x: 'auto-rebaseline-alt-temporary-branch'
self.command.execute(MockOptions(optimize=True, verbose=False, results_directory=False, auth_refresh_token_json=None), [], self.tool)
self.assertEqual(self.tool.executive.calls, [
[['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', '', 'fast/dom/prototype-taco.html']],
['git', 'cl', 'upload', '-f'],
['git', 'pull'],
['git', 'cl', 'land', '-f', '-v'],
['git', 'config', 'branch.auto-rebaseline-temporary-branch.rietveldissue'],
])
self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
Bug(foo) [ Linux Mac XP ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
finally:
builders._exact_matches = old_exact_matches
webkitpy.tool.commands.rebaseline._get_branch_name_or_ref = old_branch_name
def test_execute_with_rietveld_auth_refresh_token(self):
RIETVELD_REFRESH_TOKEN = '/creds/refresh_tokens/test_rietveld_token'
def blame(path):
return """
6469e754a1 path/to/TestExpectations (<foobarbaz1@chromium.org> 2013-04-28 04:52:41 +0000 13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
"""
self.tool.scm().blame = blame
test_port = self._setup_test_port()
old_builder_data = self.command.builder_data
def builder_data():
self.command._builder_data['MOCK Leopard'] = self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"fast": {
"dom": {
"prototype-taco.html": {
"expected": "FAIL",
"actual": "PASS",
"is_unexpected": true
}
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
self.tool.executive = MockLineRemovingExecutive()
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.command.tree_status = lambda: 'open'
self.command.execute(MockOptions(optimize=True, verbose=False, results_directory=False, auth_refresh_token_json=RIETVELD_REFRESH_TOKEN), [], self.tool)
self.assertEqual(self.tool.executive.calls, [
[['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', '', 'fast/dom/prototype-taco.html']],
['git', 'cl', 'upload', '-f', '--auth-refresh-token-json', RIETVELD_REFRESH_TOKEN],
['git', 'pull'],
['git', 'cl', 'land', '-f', '-v', '--auth-refresh-token-json', RIETVELD_REFRESH_TOKEN],
['git', 'config', 'branch.auto-rebaseline-temporary-branch.rietveldissue'],
])
# The mac ports should both be removed since they're the only ones in builders._exact_matches.
self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
finally:
builders._exact_matches = old_exact_matches
class TestRebaselineOMatic(_BaseTestCase):
command_constructor = RebaselineOMatic
def setUp(self):
super(TestRebaselineOMatic, self).setUp()
self._logs = []
def _mock_log_to_server(self, log=''):
self._logs.append(log)
def test_run_logged_command(self):
self.command._verbose = False
self.command._post_log_to_server = self._mock_log_to_server
self.command._run_logged_command(['echo', 'foo'])
self.assertEqual(self.tool.executive.calls, [['echo', 'foo']])
self.assertEqual(self._logs, ['MOCK STDOUT'])
def test_do_one_rebaseline(self):
self.command._verbose = False
self.command._post_log_to_server = self._mock_log_to_server
oc = OutputCapture()
oc.capture_output()
self.command._do_one_rebaseline()
out, _, _ = oc.restore_output()
self.assertEqual(out, '')
self.assertEqual(self.tool.executive.calls, [
['git', 'pull'],
['/mock-checkout/third_party/WebKit/Tools/Scripts/webkit-patch', 'auto-rebaseline'],
])
self.assertEqual(self._logs, ['MOCK STDOUT'])
def test_do_one_rebaseline_verbose(self):
self.command._verbose = True
self.command._post_log_to_server = self._mock_log_to_server
oc = OutputCapture()
oc.capture_output()
self.command._do_one_rebaseline()
out, _, _ = oc.restore_output()
self.assertEqual(out, 'MOCK STDOUT\n')
self.assertEqual(self.tool.executive.calls, [
['git', 'pull'],
['/mock-checkout/third_party/WebKit/Tools/Scripts/webkit-patch', 'auto-rebaseline', '--verbose'],
])
self.assertEqual(self._logs, ['MOCK STDOUT'])
|
|
#!/usr/bin/env python
#
# Copyright (C) 2017 ShadowMan
#
import abc
from urllib import parse
from websocket.utils import generic, exceptions
class KeyValuePairs(object):
""" A Key-Value Pairs
:param self._key: Key in the key-value pairs
:param self._index: Lowercase keys
:param self._value: Value in the key-value pairs
"""
def __init__(self, key: [bytes, str], value: [bytes, str]):
self._key = generic.to_bytes(key)
self._index = generic.to_bytes(key).lower()
self._value = generic.to_bytes(value)
def pack(self):
return b': '.join([self._key, self._value])
@property
def key(self):
return self._key
@property
def value(self):
return self._value
def __str__(self):
return '<KeyValuePairs {} => {}>'.format(self._key, self._value)
def __repr__(self):
return self.__str__()
class HttpOptions(object):
""" Http header options
:param self._options: options key-value-pairs
"""
def __init__(self, *options, **kwargs):
self._options = dict()
self.update(*options, **kwargs)
def get_value(self, key: [bytes, str], *, value_type=bytes):
_kvp = self._options[generic.to_bytes(key).lower()]
if _kvp:
if value_type is str:
return generic.to_string(_kvp.value)
elif value_type is bytes:
return _kvp.value
else:
raise exceptions.ParameterError(
'value type except str or bytes, got {}'.format(
value_type.__name__))
return None
def update(self, *options, **kwargs):
for option in options:
if isinstance(option, (list, tuple)):
if len(option) == 2:
k = generic.to_bytes(option[0])
v = generic.to_bytes(option[1])
self._options.update({k.lower(): KeyValuePairs(k, v)})
elif isinstance(option, KeyValuePairs):
self._options.update({option.key.lower(): option})
for k, v in kwargs.items():
k = generic.to_bytes(k)
self._options.update({k.lower(): KeyValuePairs(k, v)})
def pack(self):
return b'\r\n'.join(map(lambda p: p.pack(), self._options.values()))
def __contains__(self, key: [bytes, str]):
key = generic.to_bytes(key).lower()
return key in self._options
def __len__(self):
return len(self._options)
def __getitem__(self, key: [bytes, str]):
key = generic.to_bytes(key).lower()
return self._options.get(key, None)
def __str__(self):
return self._dumps(self._options)
@staticmethod
def _dumps(_object: dict, indent: int = 1):
rst = b'{\n'
for k, v in _object.items():
rst += (b'\t' * indent) + k + b': ' + v.value + b'\n'
rst += b'}'
return generic.to_string(rst)
def __repr__(self):
return self.__str__()
# Http version 1.0
HTTP_VERSION_1_0 = 0x0000
# Http version 1.1
HTTP_VERSION_1_1 = 0x0001
# Http GET method
HTTP_GET = b'GET'
# Http POST method
HTTP_POST = b'POST'
# Http PUT method
HTTP_PUT = b'PUT'
# Http DELETE method
HTTP_DELETE = b'DELETE'
# Http UPDATE method
HTTP_UPDATE = b'UPDATE'
# Http HEAD method
HTTP_HEAD = b'HEAD'
# Http methods
_http_methods = \
[HTTP_GET, HTTP_POST, HTTP_PUT, HTTP_DELETE, HTTP_UPDATE, HTTP_HEAD]
class _HttpMessage(object, metaclass=abc.ABCMeta):
def __init__(self, http_version, payload_data: [bytes, str], *options):
self.header = HttpOptions(*options)
if http_version not in [HTTP_VERSION_1_0, HTTP_VERSION_1_1]:
raise exceptions.ParameterError('http version invalid')
self.http_version = b'HTTP/1.' + \
b'1' if http_version is HTTP_VERSION_1_1 else b'0'
self._payload_data = b'' if payload_data is None else payload_data
self._payload_data = generic.to_bytes(self._payload_data)
if len(self._payload_data):
self.header.update(('Content-Length', len(self._payload_data)))
@property
def content_length(self):
return len(self._payload_data)
@abc.abstractclassmethod
def pack(self):
pass
@abc.abstractclassmethod
def __str__(self):
pass
@abc.abstractclassmethod
def __repr__(self):
pass
class HttpRequest(_HttpMessage):
def __init__(self, method, url: [bytes, str], *options,
http_version=HTTP_VERSION_1_1, payload_data=None):
super(HttpRequest, self).__init__(http_version, payload_data, *options)
self._url_split_rst = parse.urlparse(url)
self._request_url = generic.to_bytes(url)
method = generic.to_bytes(method).upper()
if method not in _http_methods:
raise exceptions.ParameterError('method parameter invalid')
self._http_method = method
def pack(self):
request_line = b' '.join(
[self._http_method, self._request_url, self.http_version])
return b'\r\n'.join(
[request_line, self.header.pack(), b'', self._payload_data])
def __str__(self):
return "<HttpRequest method='{}' url='{}'>".format(
generic.to_string(self._http_method),
generic.to_string(self._request_url))
def __repr__(self):
return self.__str__()
@property
def url_scheme(self):
return generic.to_string(self._url_split_rst.scheme)
@property
def url_netloc(self):
return generic.to_string(self._url_split_rst.netloc)
@property
def url_path(self):
return generic.to_string(self._url_split_rst.path)
_status_codes = {
# Informational.
100: b'Continue',
101: b'Switching Protocols',
200: b'Ok',
# Client Error.
400: b'Bad Request',
401: b'Unauthorized',
403: b'Forbidden',
404: b'Not Found',
405: b'Method Not Allowed',
# Server Error.
500: b'Internal Server Error',
503: b'Service Unavailable',
505: b'Http Version Not Supported',
}
class HttpResponse(_HttpMessage):
def __init__(self, status_code, *options,
http_version=HTTP_VERSION_1_1, payload_data=None):
super(HttpResponse, self).__init__(http_version, payload_data, *options)
if not isinstance(status_code, int):
raise exceptions.ParameterError(
'status code except int, got {}'.format(type(status_code)))
self._status_code = status_code
self._description = _status_codes.get(self._status_code, b'')
def pack(self):
status_code = generic.to_bytes(self._status_code)
response_line = b' '.join(
[self.http_version, status_code, self._description])
return b'\r\n'.join(
[response_line, self.header.pack(), b'', self._payload_data])
def __str__(self):
return '<HttpResponse status={}>'.format(self._status_code)
def __repr__(self):
return self.__str__()
def factory_http_message(raw_http_message: bytes):
if isinstance(raw_http_message, str):
raw_http_message = generic.to_bytes(raw_http_message)
if not isinstance(raw_http_message, bytes):
raise exceptions.ParameterError('raw message except bytes type')
if not raw_http_message.find(b'\r\n\r\n'):
raise exceptions.ParameterError('raw message may not be complete')
split_rst = raw_http_message.split(b'\r\n\r\n', 1)
if len(split_rst) is 1:
header, payload = split_rst[0], None
else:
header, payload = split_rst
header_lines = header.split(b'\r\n')
_http_options = []
for option in header_lines[1:]:
k, v = option.split(b':', 1)
_http_options.append((k.strip(), v.strip()))
a, b, c = header_lines[0].split(b' ', 2)
# request or response
if a in _http_methods:
# raw message is http-request
_http_version = \
HTTP_VERSION_1_1 if c == b'HTTP/1.1' else HTTP_VERSION_1_0
return HttpRequest(a, b, *_http_options,
http_version=_http_version,
payload_data=payload)
else:
# raw message is http-response
_http_version = \
HTTP_VERSION_1_1 if a == b'HTTP/1.1' else HTTP_VERSION_1_0
return HttpResponse(int(b), *_http_options,
http_version=_http_version,
payload_data=payload)
if __name__ == '__main__':
key_value_pairs = KeyValuePairs('key1', 'value1')
assert key_value_pairs.key == b'key1'
assert key_value_pairs.value == b'value1'
assert key_value_pairs.pack() == b'key1: value1'
ops = HttpOptions(('key1', 'value1'), ('key2', 'value2'))
try:
assert ops.pack() == b'key1: value1\r\nkey2: value2'
except AssertionError:
assert ops.pack() == b'key2: value2\r\nkey1: value1'
assert 'key1' in ops
assert 'KEY1' in ops
assert 'KeY1' in ops
assert 'key-1' not in ops
req = HttpRequest('GET', '/url_path?query=value#frag=footer',
('key1', 'value1'), ('key2', 'value2'), payload_data='+')
print(req)
print(req.pack())
rsp = HttpResponse(
200, ('key1', 'value1'), ('key2', 'value2'), payload_data=b'+')
print(rsp)
print(rsp.pack())
msg0 = factory_http_message(req.pack())
print(msg0)
assert 'Content-Length' in msg0.header
msg1 = factory_http_message(rsp.pack())
print(msg1)
assert 'Content-Length' in msg1.header
|
|
import sys
import unittest
from test import support
from test.test_grammar import (VALID_UNDERSCORE_LITERALS,
INVALID_UNDERSCORE_LITERALS)
L = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', 314),
('314 ', 314),
(' \t\t 314 \t\t ', 314),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', 1),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
("\u0200", ValueError)
]
class IntSubclass(int):
pass
class IntTestCases(unittest.TestCase):
def test_basic(self):
self.assertEqual(int(314), 314)
self.assertEqual(int(3.14), 3)
# Check that conversion from float truncates towards zero
self.assertEqual(int(-3.14), -3)
self.assertEqual(int(3.9), 3)
self.assertEqual(int(-3.9), -3)
self.assertEqual(int(3.5), 3)
self.assertEqual(int(-3.5), -3)
self.assertEqual(int("-3"), -3)
self.assertEqual(int(" -3 "), -3)
self.assertEqual(int("\N{EM SPACE}-3\N{EN SPACE}"), -3)
# Different base:
self.assertEqual(int("10",16), 16)
# Test conversion from strings and various anomalies
for s, v in L:
for sign in "", "+", "-":
for prefix in "", " ", "\t", " \t\t ":
ss = prefix + sign + s
vv = v
if sign == "-" and v is not ValueError:
vv = -v
try:
self.assertEqual(int(ss), vv)
except ValueError:
pass
s = repr(-1-sys.maxsize)
x = int(s)
self.assertEqual(x+1, -sys.maxsize)
self.assertIsInstance(x, int)
# should return int
self.assertEqual(int(s[1:]), sys.maxsize+1)
# should return int
x = int(1e100)
self.assertIsInstance(x, int)
x = int(-1e100)
self.assertIsInstance(x, int)
# SF bug 434186: 0x80000000/2 != 0x80000000>>1.
# Worked by accident in Windows release build, but failed in debug build.
# Failed in all Linux builds.
x = -1-sys.maxsize
self.assertEqual(x >> 1, x//2)
x = int('1' * 600)
self.assertIsInstance(x, int)
self.assertRaises(TypeError, int, 1, 12)
self.assertEqual(int('0o123', 0), 83)
self.assertEqual(int('0x123', 16), 291)
# Bug 1679: "0x" is not a valid hex literal
self.assertRaises(ValueError, int, "0x", 16)
self.assertRaises(ValueError, int, "0x", 0)
self.assertRaises(ValueError, int, "0o", 8)
self.assertRaises(ValueError, int, "0o", 0)
self.assertRaises(ValueError, int, "0b", 2)
self.assertRaises(ValueError, int, "0b", 0)
# SF bug 1334662: int(string, base) wrong answers
# Various representations of 2**32 evaluated to 0
# rather than 2**32 in previous versions
self.assertEqual(int('100000000000000000000000000000000', 2), 4294967296)
self.assertEqual(int('102002022201221111211', 3), 4294967296)
self.assertEqual(int('10000000000000000', 4), 4294967296)
self.assertEqual(int('32244002423141', 5), 4294967296)
self.assertEqual(int('1550104015504', 6), 4294967296)
self.assertEqual(int('211301422354', 7), 4294967296)
self.assertEqual(int('40000000000', 8), 4294967296)
self.assertEqual(int('12068657454', 9), 4294967296)
self.assertEqual(int('4294967296', 10), 4294967296)
self.assertEqual(int('1904440554', 11), 4294967296)
self.assertEqual(int('9ba461594', 12), 4294967296)
self.assertEqual(int('535a79889', 13), 4294967296)
self.assertEqual(int('2ca5b7464', 14), 4294967296)
self.assertEqual(int('1a20dcd81', 15), 4294967296)
self.assertEqual(int('100000000', 16), 4294967296)
self.assertEqual(int('a7ffda91', 17), 4294967296)
self.assertEqual(int('704he7g4', 18), 4294967296)
self.assertEqual(int('4f5aff66', 19), 4294967296)
self.assertEqual(int('3723ai4g', 20), 4294967296)
self.assertEqual(int('281d55i4', 21), 4294967296)
self.assertEqual(int('1fj8b184', 22), 4294967296)
self.assertEqual(int('1606k7ic', 23), 4294967296)
self.assertEqual(int('mb994ag', 24), 4294967296)
self.assertEqual(int('hek2mgl', 25), 4294967296)
self.assertEqual(int('dnchbnm', 26), 4294967296)
self.assertEqual(int('b28jpdm', 27), 4294967296)
self.assertEqual(int('8pfgih4', 28), 4294967296)
self.assertEqual(int('76beigg', 29), 4294967296)
self.assertEqual(int('5qmcpqg', 30), 4294967296)
self.assertEqual(int('4q0jto4', 31), 4294967296)
self.assertEqual(int('4000000', 32), 4294967296)
self.assertEqual(int('3aokq94', 33), 4294967296)
self.assertEqual(int('2qhxjli', 34), 4294967296)
self.assertEqual(int('2br45qb', 35), 4294967296)
self.assertEqual(int('1z141z4', 36), 4294967296)
# tests with base 0
# this fails on 3.0, but in 2.x the old octal syntax is allowed
self.assertEqual(int(' 0o123 ', 0), 83)
self.assertEqual(int(' 0o123 ', 0), 83)
self.assertEqual(int('000', 0), 0)
self.assertEqual(int('0o123', 0), 83)
self.assertEqual(int('0x123', 0), 291)
self.assertEqual(int('0b100', 0), 4)
self.assertEqual(int(' 0O123 ', 0), 83)
self.assertEqual(int(' 0X123 ', 0), 291)
self.assertEqual(int(' 0B100 ', 0), 4)
# without base still base 10
self.assertEqual(int('0123'), 123)
self.assertEqual(int('0123', 10), 123)
# tests with prefix and base != 0
self.assertEqual(int('0x123', 16), 291)
self.assertEqual(int('0o123', 8), 83)
self.assertEqual(int('0b100', 2), 4)
self.assertEqual(int('0X123', 16), 291)
self.assertEqual(int('0O123', 8), 83)
self.assertEqual(int('0B100', 2), 4)
# the code has special checks for the first character after the
# type prefix
self.assertRaises(ValueError, int, '0b2', 2)
self.assertRaises(ValueError, int, '0b02', 2)
self.assertRaises(ValueError, int, '0B2', 2)
self.assertRaises(ValueError, int, '0B02', 2)
self.assertRaises(ValueError, int, '0o8', 8)
self.assertRaises(ValueError, int, '0o08', 8)
self.assertRaises(ValueError, int, '0O8', 8)
self.assertRaises(ValueError, int, '0O08', 8)
self.assertRaises(ValueError, int, '0xg', 16)
self.assertRaises(ValueError, int, '0x0g', 16)
self.assertRaises(ValueError, int, '0Xg', 16)
self.assertRaises(ValueError, int, '0X0g', 16)
# SF bug 1334662: int(string, base) wrong answers
# Checks for proper evaluation of 2**32 + 1
self.assertEqual(int('100000000000000000000000000000001', 2), 4294967297)
self.assertEqual(int('102002022201221111212', 3), 4294967297)
self.assertEqual(int('10000000000000001', 4), 4294967297)
self.assertEqual(int('32244002423142', 5), 4294967297)
self.assertEqual(int('1550104015505', 6), 4294967297)
self.assertEqual(int('211301422355', 7), 4294967297)
self.assertEqual(int('40000000001', 8), 4294967297)
self.assertEqual(int('12068657455', 9), 4294967297)
self.assertEqual(int('4294967297', 10), 4294967297)
self.assertEqual(int('1904440555', 11), 4294967297)
self.assertEqual(int('9ba461595', 12), 4294967297)
self.assertEqual(int('535a7988a', 13), 4294967297)
self.assertEqual(int('2ca5b7465', 14), 4294967297)
self.assertEqual(int('1a20dcd82', 15), 4294967297)
self.assertEqual(int('100000001', 16), 4294967297)
self.assertEqual(int('a7ffda92', 17), 4294967297)
self.assertEqual(int('704he7g5', 18), 4294967297)
self.assertEqual(int('4f5aff67', 19), 4294967297)
self.assertEqual(int('3723ai4h', 20), 4294967297)
self.assertEqual(int('281d55i5', 21), 4294967297)
self.assertEqual(int('1fj8b185', 22), 4294967297)
self.assertEqual(int('1606k7id', 23), 4294967297)
self.assertEqual(int('mb994ah', 24), 4294967297)
self.assertEqual(int('hek2mgm', 25), 4294967297)
self.assertEqual(int('dnchbnn', 26), 4294967297)
self.assertEqual(int('b28jpdn', 27), 4294967297)
self.assertEqual(int('8pfgih5', 28), 4294967297)
self.assertEqual(int('76beigh', 29), 4294967297)
self.assertEqual(int('5qmcpqh', 30), 4294967297)
self.assertEqual(int('4q0jto5', 31), 4294967297)
self.assertEqual(int('4000001', 32), 4294967297)
self.assertEqual(int('3aokq95', 33), 4294967297)
self.assertEqual(int('2qhxjlj', 34), 4294967297)
self.assertEqual(int('2br45qc', 35), 4294967297)
self.assertEqual(int('1z141z5', 36), 4294967297)
def test_underscores(self):
for lit in VALID_UNDERSCORE_LITERALS:
if any(ch in lit for ch in '.eEjJ'):
continue
self.assertEqual(int(lit, 0), eval(lit))
self.assertEqual(int(lit, 0), int(lit.replace('_', ''), 0))
for lit in INVALID_UNDERSCORE_LITERALS:
if any(ch in lit for ch in '.eEjJ'):
continue
self.assertRaises(ValueError, int, lit, 0)
# Additional test cases with bases != 0, only for the constructor:
self.assertEqual(int("1_00", 3), 9)
self.assertEqual(int("0_100"), 100) # not valid as a literal!
self.assertEqual(int(b"1_00"), 100) # byte underscore
self.assertRaises(ValueError, int, "_100")
self.assertRaises(ValueError, int, "+_100")
self.assertRaises(ValueError, int, "1__00")
self.assertRaises(ValueError, int, "100_")
@support.cpython_only
def test_small_ints(self):
# Bug #3236: Return small longs from PyLong_FromString
self.assertIs(int('10'), 10)
self.assertIs(int('-1'), -1)
self.assertIs(int(b'10'), 10)
self.assertIs(int(b'-1'), -1)
def test_no_args(self):
self.assertEqual(int(), 0)
def test_keyword_args(self):
# Test invoking int() using keyword arguments.
self.assertEqual(int(x=1.2), 1)
self.assertEqual(int('100', base=2), 4)
self.assertEqual(int(x='100', base=2), 4)
self.assertRaises(TypeError, int, base=10)
self.assertRaises(TypeError, int, base=0)
def test_int_base_limits(self):
"""Testing the supported limits of the int() base parameter."""
self.assertEqual(int('0', 5), 0)
with self.assertRaises(ValueError):
int('0', 1)
with self.assertRaises(ValueError):
int('0', 37)
with self.assertRaises(ValueError):
int('0', -909) # An old magic value base from Python 2.
with self.assertRaises(ValueError):
int('0', base=0-(2**234))
with self.assertRaises(ValueError):
int('0', base=2**234)
# Bases 2 through 36 are supported.
for base in range(2,37):
self.assertEqual(int('0', base=base), 0)
def test_int_base_bad_types(self):
"""Not integer types are not valid bases; issue16772."""
with self.assertRaises(TypeError):
int('0', 5.5)
with self.assertRaises(TypeError):
int('0', 5.0)
def test_int_base_indexable(self):
class MyIndexable(object):
def __init__(self, value):
self.value = value
def __index__(self):
return self.value
# Check out of range bases.
for base in 2**100, -2**100, 1, 37:
with self.assertRaises(ValueError):
int('43', base)
# Check in-range bases.
self.assertEqual(int('101', base=MyIndexable(2)), 5)
self.assertEqual(int('101', base=MyIndexable(10)), 101)
self.assertEqual(int('101', base=MyIndexable(36)), 1 + 36**2)
def test_non_numeric_input_types(self):
# Test possible non-numeric types for the argument x, including
# subclasses of the explicitly documented accepted types.
class CustomStr(str): pass
class CustomBytes(bytes): pass
class CustomByteArray(bytearray): pass
factories = [
bytes,
bytearray,
lambda b: CustomStr(b.decode()),
CustomBytes,
CustomByteArray,
memoryview,
]
try:
from array import array
except ImportError:
pass
else:
factories.append(lambda b: array('B', b))
for f in factories:
x = f(b'100')
with self.subTest(type(x)):
self.assertEqual(int(x), 100)
if isinstance(x, (str, bytes, bytearray)):
self.assertEqual(int(x, 2), 4)
else:
msg = "can't convert non-string"
with self.assertRaisesRegex(TypeError, msg):
int(x, 2)
with self.assertRaisesRegex(ValueError, 'invalid literal'):
int(f(b'A' * 0x10))
def test_int_memoryview(self):
self.assertEqual(int(memoryview(b'123')[1:3]), 23)
self.assertEqual(int(memoryview(b'123\x00')[1:3]), 23)
self.assertEqual(int(memoryview(b'123 ')[1:3]), 23)
self.assertEqual(int(memoryview(b'123A')[1:3]), 23)
self.assertEqual(int(memoryview(b'1234')[1:3]), 23)
def test_string_float(self):
self.assertRaises(ValueError, int, '1.2')
def test_intconversion(self):
# Test __int__()
class ClassicMissingMethods:
pass
self.assertRaises(TypeError, int, ClassicMissingMethods())
class MissingMethods(object):
pass
self.assertRaises(TypeError, int, MissingMethods())
class Foo0:
def __int__(self):
return 42
self.assertEqual(int(Foo0()), 42)
class Classic:
pass
for base in (object, Classic):
class IntOverridesTrunc(base):
def __int__(self):
return 42
def __trunc__(self):
return -12
self.assertEqual(int(IntOverridesTrunc()), 42)
class JustTrunc(base):
def __trunc__(self):
return 42
self.assertEqual(int(JustTrunc()), 42)
class ExceptionalTrunc(base):
def __trunc__(self):
1 / 0
with self.assertRaises(ZeroDivisionError):
int(ExceptionalTrunc())
for trunc_result_base in (object, Classic):
class Integral(trunc_result_base):
def __int__(self):
return 42
class TruncReturnsNonInt(base):
def __trunc__(self):
return Integral()
self.assertEqual(int(TruncReturnsNonInt()), 42)
class NonIntegral(trunc_result_base):
def __trunc__(self):
# Check that we avoid infinite recursion.
return NonIntegral()
class TruncReturnsNonIntegral(base):
def __trunc__(self):
return NonIntegral()
try:
int(TruncReturnsNonIntegral())
except TypeError as e:
self.assertEqual(str(e),
"__trunc__ returned non-Integral"
" (type NonIntegral)")
else:
self.fail("Failed to raise TypeError with %s" %
((base, trunc_result_base),))
# Regression test for bugs.python.org/issue16060.
class BadInt(trunc_result_base):
def __int__(self):
return 42.0
class TruncReturnsBadInt(base):
def __trunc__(self):
return BadInt()
with self.assertRaises(TypeError):
int(TruncReturnsBadInt())
def test_int_subclass_with_int(self):
class MyInt(int):
def __int__(self):
return 42
class BadInt(int):
def __int__(self):
return 42.0
my_int = MyInt(7)
self.assertEqual(my_int, 7)
self.assertEqual(int(my_int), 42)
self.assertRaises(TypeError, int, BadInt())
def test_int_returns_int_subclass(self):
class BadInt:
def __int__(self):
return True
class BadInt2(int):
def __int__(self):
return True
class TruncReturnsBadInt:
def __trunc__(self):
return BadInt()
class TruncReturnsIntSubclass:
def __trunc__(self):
return True
bad_int = BadInt()
with self.assertWarns(DeprecationWarning):
n = int(bad_int)
self.assertEqual(n, 1)
self.assertIs(type(n), int)
bad_int = BadInt2()
with self.assertWarns(DeprecationWarning):
n = int(bad_int)
self.assertEqual(n, 1)
self.assertIs(type(n), int)
bad_int = TruncReturnsBadInt()
with self.assertWarns(DeprecationWarning):
n = int(bad_int)
self.assertEqual(n, 1)
self.assertIs(type(n), int)
good_int = TruncReturnsIntSubclass()
n = int(good_int)
self.assertEqual(n, 1)
self.assertIs(type(n), int)
n = IntSubclass(good_int)
self.assertEqual(n, 1)
self.assertIs(type(n), IntSubclass)
def test_error_message(self):
def check(s, base=None):
with self.assertRaises(ValueError,
msg="int(%r, %r)" % (s, base)) as cm:
if base is None:
int(s)
else:
int(s, base)
self.assertEqual(cm.exception.args[0],
"invalid literal for int() with base %d: %r" %
(10 if base is None else base, s))
check('\xbd')
check('123\xbd')
check(' 123 456 ')
check('123\x00')
# SF bug 1545497: embedded NULs were not detected with explicit base
check('123\x00', 10)
check('123\x00 245', 20)
check('123\x00 245', 16)
check('123\x00245', 20)
check('123\x00245', 16)
# byte string with embedded NUL
check(b'123\x00')
check(b'123\x00', 10)
# non-UTF-8 byte string
check(b'123\xbd')
check(b'123\xbd', 10)
# lone surrogate in Unicode string
check('123\ud800')
check('123\ud800', 10)
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cloud Custodian Lambda Provisioning Support
docs/lambda.rst
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import abc
import base64
import imp
import hashlib
import io
import json
import logging
import os
import sys
import time
import tempfile
import zipfile
from boto3.s3.transfer import S3Transfer, TransferConfig
from botocore.exceptions import ClientError
from concurrent.futures import ThreadPoolExecutor
# Static event mapping to help simplify cwe rules creation
from c7n.cwe import CloudWatchEvents
from c7n.logs_support import _timestamp_from_string
from c7n.utils import parse_s3, local_session
log = logging.getLogger('custodian.lambda')
class PythonPackageArchive(object):
"""Creates a zip file for python lambda functions.
:param tuple modules: the Python modules to add to the archive
Amazon doesn't give us straightforward docs here, only `an example
<http://docs.aws.amazon.com/lambda/latest/dg/with-s3-example-deployment-pkg.html#with-s3-example-deployment-pkg-python>`_,
from which we can infer that they simply unzip the file into a directory on
``sys.path``. So what we do is locate all of the ``modules`` specified, and
add all of the ``.py`` files we find for these modules to a zip file.
In addition to the modules specified during instantiation, you can add
arbitrary additional files to the archive using :py:func:`add_file` and
:py:func:`add_contents`. For example, since we only add ``*.py`` files for
you, you'll need to manually add files for any compiled extension modules
that your Lambda requires.
"""
def __init__(self, *modules):
self._temp_archive_file = tempfile.NamedTemporaryFile()
self._zip_file = zipfile.ZipFile(
self._temp_archive_file, mode='w',
compression=zipfile.ZIP_DEFLATED)
self._closed = False
self.add_modules(*modules)
@property
def path(self):
return self._temp_archive_file.name
@property
def size(self):
if not self._closed:
raise ValueError("Archive not closed, size not accurate")
return os.stat(self._temp_archive_file.name).st_size
def add_modules(self, *modules):
"""Add the named Python modules to the archive. For consistency's sake
we only add ``*.py`` files, not ``*.pyc``. We also don't add other
files, including compiled modules. You'll have to add such files
manually using :py:meth:`add_file`.
"""
for module in modules:
path = imp.find_module(module)[1]
if os.path.isfile(path):
if not path.endswith('.py'):
raise ValueError('We need a *.py source file instead of ' + path)
self.add_file(path)
elif os.path.isdir(path):
for root, dirs, files in os.walk(path):
arc_prefix = os.path.relpath(root, os.path.dirname(path))
for f in files:
if not f.endswith('.py'):
continue
f_path = os.path.join(root, f)
dest_path = os.path.join(arc_prefix, f)
self.add_file(f_path, dest_path)
def add_file(self, src, dest=None):
"""Add the file at ``src`` to the archive.
If ``dest`` is ``None`` then it is added under just the original
filename. So ``add_file('foo/bar.txt')`` ends up at ``bar.txt`` in the
archive, while ``add_file('bar.txt', 'foo/bar.txt')`` ends up at
``foo/bar.txt``.
"""
dest = dest or os.path.basename(src)
with open(src, 'rb') as fp:
contents = fp.read()
self.add_contents(dest, contents)
def add_py_file(self, src, dest=None):
"""This is a special case of :py:meth:`add_file` that helps for adding
a ``py`` when a ``pyc`` may be present as well. So for example, if
``__file__`` is ``foo.pyc`` and you do:
.. code-block:: python
archive.add_py_file(__file__)
then this method will add ``foo.py`` instead if it exists, and raise
``IOError`` if it doesn't.
"""
src = src[:-1] if src.endswith('.pyc') else src
self.add_file(src, dest)
def add_contents(self, dest, contents):
"""Add file contents to the archive under ``dest``.
If ``dest`` is a path, it will be added compressed and world-readable
(user-writeable). You may also pass a :py:class:`~zipfile.ZipInfo` for
custom behavior.
"""
assert not self._closed, "Archive closed"
if not isinstance(dest, zipfile.ZipInfo):
dest = zinfo(dest) # see for some caveats
self._zip_file.writestr(dest, contents)
def close(self):
"""Close the zip file.
Note underlying tempfile is removed when archive is garbage collected.
"""
self._closed = True
self._zip_file.close()
log.debug(
"Created custodian lambda archive size: %0.2fmb",
(os.path.getsize(self._temp_archive_file.name) / (
1024.0 * 1024.0)))
return self
def remove(self):
"""Dispose of the temp file for garbage collection."""
if self._temp_archive_file:
self._temp_archive_file = None
def get_checksum(self):
"""Return the b64 encoded sha256 checksum of the archive."""
assert self._closed, "Archive not closed"
with open(self._temp_archive_file.name, 'rb') as fh:
return base64.b64encode(checksum(fh, hashlib.sha256()))
def get_bytes(self):
"""Return the entire zip file as a byte string. """
assert self._closed, "Archive not closed"
return open(self._temp_archive_file.name, 'rb').read()
def get_reader(self):
"""Return a read-only :py:class:`~zipfile.ZipFile`."""
assert self._closed, "Archive not closed"
buf = io.BytesIO(self.get_bytes())
return zipfile.ZipFile(buf, mode='r')
def get_filenames(self):
"""Return a list of filenames in the archive."""
return [n.filename for n in self.get_reader().filelist]
def checksum(fh, hasher, blocksize=65536):
buf = fh.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = fh.read(blocksize)
return hasher.digest()
def custodian_archive():
"""Create a lambda code archive for running custodian."""
return PythonPackageArchive('c7n', 'pkg_resources', 'ipaddress')
class LambdaManager(object):
""" Provides CRUD operations around lambda functions
"""
def __init__(self, session_factory, s3_asset_path=None):
self.session_factory = session_factory
self.client = self.session_factory().client('lambda')
self.s3_asset_path = s3_asset_path
def list_functions(self, prefix=None):
p = self.client.get_paginator('list_functions')
for rp in p.paginate():
for f in rp.get('Functions', []):
if not prefix:
yield f
elif f['FunctionName'].startswith(prefix):
yield f
def publish(self, func, alias=None, role=None, s3_uri=None):
result, changed = self._create_or_update(
func, role, s3_uri, qualifier=alias)
func.arn = result['FunctionArn']
if alias and changed:
func.alias = self.publish_alias(result, alias)
elif alias:
func.alias = "%s:%s" % (func.arn, alias)
else:
func.alias = func.arn
for e in func.get_events(self.session_factory):
if e.add(func):
log.debug(
"Added event source: %s to function: %s",
e, func.alias)
return result
def remove(self, func, alias=None):
log.info("Removing lambda function %s", func.name)
for e in func.get_events(self.session_factory):
e.remove(func)
try:
self.client.delete_function(FunctionName=func.name)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
def metrics(self, funcs, start, end, period=5 * 60):
def func_metrics(f):
metrics = local_session(self.session_factory).client('cloudwatch')
values = {}
for m in ('Errors', 'Invocations', 'Durations', 'Throttles'):
values[m] = metrics.get_metric_statistics(
Namespace="AWS/Lambda",
Dimensions=[{
'Name': 'FunctionName',
'Value': (
isinstance(f, dict) and f['FunctionName'] or f.name)}],
Statistics=["Sum"],
StartTime=start,
EndTime=end,
Period=period,
MetricName=m)['Datapoints']
return values
with ThreadPoolExecutor(max_workers=3) as w:
results = list(w.map(func_metrics, funcs))
for m, f in zip(results, funcs):
if isinstance(f, dict):
f['Metrics'] = m
return results
def logs(self, func, start, end):
logs = self.session_factory().client('logs')
group_name = "/aws/lambda/%s" % func.name
log.info("Fetching logs from group: %s" % group_name)
try:
logs.describe_log_groups(
logGroupNamePrefix=group_name)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return
raise
try:
log_streams = logs.describe_log_streams(
logGroupName=group_name,
orderBy="LastEventTime", limit=3, descending=True)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return
raise
start = _timestamp_from_string(start)
end = _timestamp_from_string(end)
for s in reversed(log_streams['logStreams']):
result = logs.get_log_events(
logGroupName=group_name,
logStreamName=s['logStreamName'],
startTime=start,
endTime=end,
)
for e in result['events']:
yield e
@staticmethod
def delta_function(old_config, new_config):
for k in new_config:
if k not in old_config or new_config[k] != old_config[k]:
return True
@staticmethod
def diff_tags(old_tags, new_tags):
add = {}
remove = set()
for k,v in new_tags.items():
if k not in old_tags or old_tags[k] != v:
add[k] = v
for k in old_tags:
if k not in new_tags:
remove.add(k)
return add, list(remove)
def _create_or_update(self, func, role=None, s3_uri=None, qualifier=None):
role = func.role or role
assert role, "Lambda function role must be specified"
archive = func.get_archive()
existing = self.get(func.name, qualifier)
if s3_uri:
# TODO: support versioned buckets
bucket, key = self._upload_func(s3_uri, func, archive)
code_ref = {'S3Bucket': bucket, 'S3Key': key}
else:
code_ref = {'ZipFile': archive.get_bytes()}
changed = False
if existing:
old_config = existing['Configuration']
if archive.get_checksum() != old_config['CodeSha256']:
log.debug("Updating function %s code", func.name)
params = dict(FunctionName=func.name, Publish=True)
params.update(code_ref)
result = self.client.update_function_code(**params)
changed = True
# TODO/Consider also set publish above to false, and publish
# after configuration change?
new_config = func.get_config()
new_config['Role'] = role
del new_config['Runtime']
new_tags = new_config.pop('Tags', {})
if self.delta_function(old_config, new_config):
log.debug("Updating function: %s config" % func.name)
result = self.client.update_function_configuration(**new_config)
changed = True
# tag dance
base_arn = old_config['FunctionArn']
if base_arn.count(':') > 6: # trim version/alias
base_arn = base_arn.rsplit(':', 1)[0]
old_tags = self.client.list_tags(Resource=base_arn)['Tags']
tags_to_add, tags_to_remove = self.diff_tags(old_tags, new_tags)
if tags_to_add:
log.debug("Adding/updating tags: %s config" % func.name)
self.client.tag_resource(
Resource=base_arn, Tags=tags_to_add)
if tags_to_remove:
log.debug("Removing tags: %s config" % func.name)
self.client.untag_resource(
Resource=base_arn, TagKeys=tags_to_remove)
if not changed:
result = old_config
else:
log.info('Publishing custodian policy lambda function %s', func.name)
params = func.get_config()
params.update({'Publish': True, 'Code': code_ref, 'Role': role})
result = self.client.create_function(**params)
changed = True
return result, changed
def _upload_func(self, s3_uri, func, archive):
_, bucket, key_prefix = parse_s3(s3_uri)
key = "%s/%s" % (key_prefix, func.name)
transfer = S3Transfer(
self.session_factory().client('s3'),
config=TransferConfig(
multipart_threshold=1024 * 1024 * 4))
transfer.upload_file(
archive.path,
bucket=bucket,
key=key,
extra_args={
'ServerSideEncryption': 'AES256'})
return bucket, key
def publish_alias(self, func_data, alias):
"""Create or update an alias for the given function.
"""
if not alias:
return func_data['FunctionArn']
func_name = func_data['FunctionName']
func_version = func_data['Version']
exists = resource_exists(
self.client.get_alias, FunctionName=func_name, Name=alias)
if not exists:
log.debug("Publishing custodian lambda alias %s", alias)
alias_result = self.client.create_alias(
FunctionName=func_name,
Name=alias,
FunctionVersion=func_version)
else:
if (exists['FunctionVersion'] == func_version and
exists['Name'] == alias):
return exists['AliasArn']
log.debug('Updating custodian lambda alias %s', alias)
alias_result = self.client.update_alias(
FunctionName=func_name,
Name=alias,
FunctionVersion=func_version)
return alias_result['AliasArn']
def get(self, func_name, qualifier=None):
params = {'FunctionName': func_name}
if qualifier:
params['Qualifier'] = qualifier
return resource_exists(
self.client.get_function, **params)
def resource_exists(op, NotFound="ResourceNotFoundException", *args, **kw):
try:
return op(*args, **kw)
except ClientError as e:
if e.response['Error']['Code'] == NotFound:
return False
raise
class AbstractLambdaFunction:
"""Abstract base class for lambda functions."""
__metaclass__ = abc.ABCMeta
alias = None
@abc.abstractproperty
def name(self):
"""Name for the lambda function"""
@abc.abstractproperty
def runtime(self):
""" """
@abc.abstractproperty
def description(self):
""" """
@abc.abstractproperty
def handler(self):
""" """
@abc.abstractproperty
def memory_size(self):
""" """
@abc.abstractproperty
def timeout(self):
""" """
@abc.abstractproperty
def role(self):
""" """
@abc.abstractproperty
def subnets(self):
""" """
@abc.abstractproperty
def security_groups(self):
""" """
@abc.abstractproperty
def dead_letter_config(self):
""" """
@abc.abstractproperty
def environment(self):
""" """
@abc.abstractproperty
def kms_key_arn(self):
""" """
@abc.abstractproperty
def tracing_config(self):
""" """
@abc.abstractproperty
def tags(self):
""" """
@abc.abstractmethod
def get_events(self, session_factory):
"""event sources that should be bound to this lambda."""
@abc.abstractmethod
def get_archive(self):
"""Return the lambda distribution archive object."""
def get_config(self):
conf = {
'FunctionName': self.name,
'MemorySize': self.memory_size,
'Role': self.role,
'Description': self.description,
'Runtime': self.runtime,
'Handler': self.handler,
'Timeout': self.timeout,
'DeadLetterConfig': self.dead_letter_config,
'Environment': self.environment,
'KMSKeyArn': self.kms_key_arn,
'TracingConfig': self.tracing_config,
'Tags': self.tags}
if self.subnets and self.security_groups:
conf['VpcConfig'] = {
'SubnetIds': self.subnets,
'SecurityGroupIds': self.security_groups}
return conf
class LambdaFunction(AbstractLambdaFunction):
def __init__(self, func_data, archive):
self.func_data = func_data
required = set((
'name', 'handler', 'memory_size',
'timeout', 'role', 'runtime',
'description'))
missing = required.difference(func_data)
if missing:
raise ValueError("Missing required keys %s" % " ".join(missing))
self.archive = archive
@property
def name(self):
return self.func_data['name']
@property
def description(self):
return self.func_data['description']
@property
def handler(self):
return self.func_data['handler']
@property
def memory_size(self):
return self.func_data['memory_size']
@property
def timeout(self):
return self.func_data['timeout']
@property
def runtime(self):
return self.func_data['runtime']
@property
def role(self):
return self.func_data['role']
@property
def security_groups(self):
return self.func_data.get('security_groups', None)
@property
def subnets(self):
return self.func_data.get('subnets', None)
@property
def dead_letter_config(self):
return self.func_data.get('dead_letter_config', {})
@property
def environment(self):
return self.func_data.get('environment', {})
@property
def kms_key_arn(self):
return self.func_data.get('kms_key_arn', '')
@property
def tracing_config(self):
return self.func_data.get('tracing_config', {})
@property
def tags(self):
return self.func_data.get('tags', {})
def get_events(self, session_factory):
return self.func_data.get('events', ())
def get_archive(self):
return self.archive
PolicyHandlerTemplate = """\
from c7n import handler
def run(event, context):
return handler.dispatch_event(event, context)
"""
class PolicyLambda(AbstractLambdaFunction):
"""Wraps a custodian policy to turn it into lambda function.
"""
handler = "custodian_policy.run"
runtime = "python%d.%d" % sys.version_info[:2]
def __init__(self, policy):
self.policy = policy
self.archive = custodian_archive()
@property
def name(self):
return "custodian-%s" % self.policy.name
@property
def description(self):
return self.policy.data.get(
'description', 'cloud-custodian lambda policy')
@property
def role(self):
return self.policy.data['mode'].get('role', '')
@property
def memory_size(self):
return self.policy.data['mode'].get('memory', 512)
@property
def timeout(self):
return self.policy.data['mode'].get('timeout', 60)
@property
def security_groups(self):
return None
@property
def subnets(self):
return None
@property
def dead_letter_config(self):
return self.policy.data['mode'].get('dead_letter_config', {})
@property
def environment(self):
return self.policy.data['mode'].get('environment', {})
@property
def kms_key_arn(self):
return self.policy.data['mode'].get('kms_key_arn', '')
@property
def tracing_config(self):
return self.policy.data['mode'].get('tracing_config', {})
@property
def tags(self):
return self.policy.data['mode'].get('tags', {})
def get_events(self, session_factory):
events = []
if self.policy.data['mode']['type'] == 'config-rule':
events.append(
ConfigRule(self.policy.data['mode'], session_factory))
else:
events.append(
CloudWatchEventSource(
self.policy.data['mode'], session_factory))
return events
def get_archive(self):
self.archive.add_contents(
'config.json', json.dumps(
{'policies': [self.policy.data]}, indent=2))
self.archive.add_contents('custodian_policy.py', PolicyHandlerTemplate)
self.archive.close()
return self.archive
def zinfo(fname):
"""Amazon lambda exec environment setup can break itself
if zip files aren't constructed a particular way.
ie. It respects file perm attributes from the zip including
those that prevent lambda from working. Namely lambda
extracts code as one user, and executes code as a different
user. Without permissions for the executing user to read
the file the lambda function is broken.
Python's default zipfile.writestr does a 0600 perm which
we modify here as a workaround.
"""
info = zipfile.ZipInfo(fname)
# Grant other users permissions to read
# http://unix.stackexchange.com/questions/14705/
info.external_attr = 0o644 << 16
return info
class CloudWatchEventSource(object):
"""Subscribe a lambda to cloud watch events.
Cloud watch events supports a number of different event
sources, from periodic timers with cron syntax, to
real time instance state notifications, cloud trail
events, and realtime asg membership changes.
Event Pattern for Instance State
.. code-block:: json
{
"source": ["aws.ec2"],
"detail-type": ["EC2 Instance State-change Notification"],
"detail": { "state": ["pending"]}
}
Event Pattern for Cloud Trail API
.. code-block:: json
{
"detail-type": ["AWS API Call via CloudTrail"],
"detail": {
"eventSource": ["s3.amazonaws.com"],
"eventName": ["CreateBucket", "DeleteBucket"]
}
}
"""
ASG_EVENT_MAPPING = {
'launch-success': 'EC2 Instance Launch Successful',
'launch-failure': 'EC2 Instance Launch Unsuccessful',
'terminate-success': 'EC2 Instance Terminate Successful',
'terminate-failure': 'EC2 Instance Terminate Unsuccessful'}
def __init__(self, data, session_factory, prefix="custodian-"):
self.session_factory = session_factory
self.session = session_factory()
self.client = self.session.client('events')
self.data = data
self.prefix = prefix
def _make_notification_id(self, function_name):
if not function_name.startswith(self.prefix):
return "%s%s" % (self.prefix, function_name)
return function_name
def get(self, rule_name):
return resource_exists(
self.client.describe_rule,
Name=self._make_notification_id(rule_name))
@staticmethod
def delta(src, tgt):
"""Given two cwe rules determine if the configuration is the same.
Name is already implied.
"""
for k in ['State', 'EventPattern', 'ScheduleExpression']:
if src.get(k) != tgt.get(k):
return True
return False
def __repr__(self):
return "<CWEvent Type:%s Events:%s>" % (
self.data.get('type'),
', '.join(map(str, self.data.get('events', []))))
def resolve_cloudtrail_payload(self, payload):
sources = self.data.get('sources', [])
events = []
for e in self.data.get('events'):
if not isinstance(e, dict):
events.append(e)
event_info = CloudWatchEvents.get(e)
if event_info is None:
continue
else:
event_info = e
events.append(e['event'])
sources.append(event_info['source'])
payload['detail'] = {
'eventSource': list(set(sources)),
'eventName': events}
def render_event_pattern(self):
event_type = self.data.get('type')
payload = {}
if event_type == 'cloudtrail':
payload['detail-type'] = ['AWS API Call via CloudTrail']
self.resolve_cloudtrail_payload(payload)
if event_type == 'cloudtrail':
if 'signin.amazonaws.com' in payload['detail']['eventSource']:
payload['detail-type'] = ['AWS Console Sign In via CloudTrail']
elif event_type == "ec2-instance-state":
payload['source'] = ['aws.ec2']
payload['detail-type'] = [
"EC2 Instance State-change Notification"]
# Technically could let empty be all events, but likely misconfig
payload['detail'] = {"state": self.data.get('events', [])}
elif event_type == "asg-instance-state":
payload['source'] = ['aws.autoscaling']
events = []
for e in self.data.get('events', []):
events.append(self.ASG_EVENT_MAPPING.get(e, e))
payload['detail-type'] = events
elif event_type == 'periodic':
pass
else:
raise ValueError(
"Unknown lambda event source type: %s" % event_type)
if not payload:
return None
return json.dumps(payload)
def add(self, func):
params = dict(
Name=func.name, Description=func.description, State='ENABLED')
pattern = self.render_event_pattern()
if pattern:
params['EventPattern'] = pattern
schedule = self.data.get('schedule')
if schedule:
params['ScheduleExpression'] = schedule
rule = self.get(func.name)
if rule and self.delta(rule, params):
log.debug("Updating cwe rule for %s" % self)
response = self.client.put_rule(**params)
elif not rule:
log.debug("Creating cwe rule for %s" % (self))
response = self.client.put_rule(**params)
else:
response = {'RuleArn': rule['Arn']}
try:
self.session.client('lambda').add_permission(
FunctionName=func.name,
StatementId=func.name,
SourceArn=response['RuleArn'],
Action='lambda:InvokeFunction',
Principal='events.amazonaws.com')
log.debug('Added lambda invoke cwe rule permission')
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceConflictException':
raise
# Add Targets
found = False
response = self.client.list_targets_by_rule(Rule=func.name)
# CWE seems to be quite picky about function arns (no aliases/versions)
func_arn = func.arn
if func_arn.count(':') > 6:
func_arn, version = func_arn.rsplit(':', 1)
for t in response['Targets']:
if func_arn == t['Arn']:
found = True
if found:
return
log.debug('Creating cwe rule target for %s on func:%s' % (
self, func_arn))
self.client.put_targets(
Rule=func.name, Targets=[{"Id": func.name, "Arn": func_arn}])
return True
def update(self, func):
self.add(func)
def pause(self, func):
try:
self.client.disable_rule(Name=func.name)
except:
pass
def resume(self, func):
try:
self.client.enable_rule(Name=func.name)
except:
pass
def remove(self, func):
if self.get(func.name):
try:
targets = self.client.list_targets_by_rule(
Rule=func.name)['Targets']
self.client.remove_targets(
Rule=func.name,
Ids=[t['Id'] for t in targets])
except ClientError as e:
log.warning(
"Could not remove targets for rule %s error: %s",
func.name, e)
self.client.delete_rule(Name=func.name)
class BucketLambdaNotification(object):
""" Subscribe a lambda to bucket notifications directly. """
def __init__(self, data, session_factory, bucket):
self.data = data
self.session_factory = session_factory
self.session = session_factory()
self.bucket = bucket
def delta(self, src, tgt):
for k in ['Id', 'LambdaFunctionArn', 'Events', 'Filters']:
if src.get(k) != tgt.get(k):
return True
return False
def _get_notifies(self, s3, func):
notifies = s3.get_bucket_notification_configuration(
Bucket=self.bucket['Name'])
found = False
for f in notifies.get('LambdaFunctionConfigurations', []):
if f['Id'] != func.name:
continue
found = f
return notifies, found
def add(self, func):
s3 = self.session.client('s3')
notifies, found = self._get_notifies(s3, func)
notifies.pop('ResponseMetadata', None)
func_arn = func.arn
if func_arn.rsplit(':', 1)[-1].isdigit():
func_arn = func_arn.rsplit(':', 1)[0]
n_params = {
'Id': func.name,
'LambdaFunctionArn': func_arn,
'Events': self.data.get('events', ['s3:ObjectCreated:*'])}
if self.data.get('filters'):
n_params['Filters'] = {
'Key': {'FilterRules': self.filters}}
if found:
if self.delta(found, n_params):
notifies['LambdaFunctionConfigurations'].remove(found)
else:
log.info("Bucket lambda notification present")
return
lambda_client = self.session.client('lambda')
params = dict(
FunctionName=func.name,
StatementId=self.bucket['Name'],
Action='lambda:InvokeFunction',
Principal='s3.amazonaws.com')
if self.data.get('account_s3'):
params['SourceAccount'] = self.data['account_s3']
params['SourceArn'] = 'arn:aws:s3:::*'
else:
params['SourceArn'] = 'arn:aws:s3:::%' % self.bucket['Name']
try:
lambda_client.add_permission(**params)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceConflictException':
raise
notifies.setdefault('LambdaFunctionConfigurations', []).append(n_params)
s3.put_bucket_notification_configuration(
Bucket=self.bucket['Name'], NotificationConfiguration=notifies)
return True
def remove(self, func):
s3 = self.session.client('s3')
notifies, found = self._get_notifies(s3, func)
if not found:
return
lambda_client = self.session.client('lambda')
try:
response = lambda_client.remove_permission(
FunctionName=func['FunctionName'],
StatementId=self.bucket['Name'])
log.debug("Removed lambda permission result: %s" % response)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
notifies['LambdaFunctionConfigurations'].remove(found)
s3.put_bucket_notification_configuration(
Bucket=self.bucket['Name'],
NotificationConfiguration=notifies)
class CloudWatchLogSubscription(object):
""" Subscribe a lambda to a log group[s]
"""
iam_delay = 1.5
def __init__(self, session_factory, log_groups, filter_pattern):
self.log_groups = log_groups
self.filter_pattern = filter_pattern
self.session_factory = session_factory
self.session = session_factory()
self.client = self.session.client('logs')
def add(self, func):
lambda_client = self.session.client('lambda')
for group in self.log_groups:
log.info(
"Creating subscription filter for %s" % group['logGroupName'])
region = group['arn'].split(':', 4)[3]
try:
lambda_client.add_permission(
FunctionName=func.name,
StatementId=group['logGroupName'][1:].replace('/', '-'),
SourceArn=group['arn'],
Action='lambda:InvokeFunction',
Principal='logs.%s.amazonaws.com' % region)
log.debug("Added lambda ipo nvoke log group permission")
# iam eventual consistency and propagation
time.sleep(self.iam_delay)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceConflictException':
raise
# Consistent put semantics / ie no op if extant
self.client.put_subscription_filter(
logGroupName=group['logGroupName'],
filterName=func.name,
filterPattern=self.filter_pattern,
destinationArn=func.alias or func.arn)
def remove(self, func):
lambda_client = self.session.client('lambda')
for group in self.log_groups:
try:
response = lambda_client.remove_permission(
FunctionName=func.name,
StatementId=group['logGroupName'][1:].replace('/', '-'))
log.debug("Removed lambda permission result: %s" % response)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
try:
response = self.client.delete_subscription_filter(
logGroupName=group['logGroupName'], filterName=func.name)
log.debug("Removed subscription filter from: %s",
group['logGroupName'])
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
class SNSSubscription(object):
""" Subscribe a lambda to one or more SNS topics.
"""
iam_delay = 1.5
def __init__(self, session_factory, topic_arns):
self.topic_arns = topic_arns
self.session_factory = session_factory
self.session = session_factory()
self.client = self.session.client('sns')
@staticmethod
def _parse_arn(arn):
parts = arn.split(':')
region, topic_name = parts[3], parts[5]
statement_id = 'sns-topic-' + topic_name
return region, topic_name, statement_id
def add(self, func):
lambda_client = self.session.client('lambda')
for arn in self.topic_arns:
region, topic_name, statement_id = self._parse_arn(arn)
log.info("Subscribing %s to %s" % (func.name, topic_name))
# Add permission to lambda for sns invocation.
try:
lambda_client.add_permission(
FunctionName=func.name,
StatementId='sns-topic-' + topic_name,
SourceArn=arn,
Action='lambda:InvokeFunction',
Principal='sns.amazonaws.com')
log.debug("Added permission for sns to invoke lambda")
# iam eventual consistency and propagation
time.sleep(self.iam_delay)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceConflictException':
raise
# Subscribe the lambda to the topic.
topic = self.session.resource('sns').Topic(arn)
topic.subscribe(Protocol='lambda', Endpoint=func.arn) # idempotent
def remove(self, func):
lambda_client = self.session.client('lambda')
for topic_arn in self.topic_arns:
region, topic_name, statement_id = self._parse_arn(topic_arn)
try:
response = lambda_client.remove_permission(
FunctionName=func.name,
StatementId=statement_id)
log.debug("Removed lambda permission result: %s" % response)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
paginator = self.client.get_paginator('list_subscriptions_by_topic')
class Done(Exception):
pass
try:
for page in paginator.paginate(TopicArn=topic_arn):
for subscription in page['Subscriptions']:
if subscription['Endpoint'] != func.arn:
continue
try:
response = self.client.unsubscribe(
SubscriptionArn=subscription['SubscriptionArn'])
log.debug("Unsubscribed %s from %s" %
(func.name, topic_name))
except ClientError as e:
code = e.response['Error']['Code']
if code != 'ResourceNotFoundException':
raise
raise Done # break out of both for loops
except Done:
pass
class BucketSNSNotification(SNSSubscription):
""" Subscribe a lambda to bucket notifications via SNS. """
def __init__(self, session_factory, bucket, topic=None):
# NB: We are overwriting __init__ vs. extending.
self.session_factory = session_factory
self.session = session_factory()
self.topic_arns = self.get_topic(bucket) if topic is None else [topic]
self.client = self.session.client('sns')
def get_topic(self, bucket):
session = local_session(self.session_factory)
sns = session.client('sns')
s3 = session.client('s3')
notifies = bucket['Notification']
if 'TopicConfigurations' not in notifies:
notifies['TopicConfigurations'] = []
all_topics = notifies['TopicConfigurations']
topic_arns = [t['TopicArn'] for t in all_topics
if 's3:ObjectCreated:*' in t['Events']]
if not topic_arns:
# No suitable existing topic. Create one.
topic_arn = sns.create_topic(Name=bucket['Name'])['TopicArn']
policy = {
'Statement': [{
'Action': 'SNS:Publish',
'Effect': 'Allow',
'Resource': topic_arn,
'Principal': {'Service': 's3.amazonaws.com'}}]}
sns.set_topic_attributes(
TopicArn=topic_arn,
AttributeName='Policy',
AttributeValue=json.dumps(policy))
notifies['TopicConfigurations'].append({
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*']})
s3.put_bucket_notification_configuration(Bucket=bucket['Name'],
NotificationConfiguration=notifies)
topic_arns = [topic_arn]
return topic_arns
class ConfigRule(object):
"""Use a lambda as a custom config rule.
"""
def __init__(self, data, session_factory):
self.data = data
self.session_factory = session_factory
self.session = session_factory()
self.client = self.session.client('config')
def __repr__(self):
return "<ConfigRule>"
def get_rule_params(self, func):
# config does not support versions/aliases on lambda funcs
func_arn = func.arn
if func_arn.count(':') > 6:
func_arn, version = func_arn.rsplit(':', 1)
params = dict(
ConfigRuleName=func.name,
Description=func.description,
Source={
'Owner': 'CUSTOM_LAMBDA',
'SourceIdentifier': func_arn,
'SourceDetails': [{
'EventSource': 'aws.config',
'MessageType': 'ConfigurationItemChangeNotification'}]
}
)
if isinstance(func, PolicyLambda):
manager = func.policy.get_resource_manager()
if hasattr(manager.get_model(), 'config_type'):
config_type = manager.get_model().config_type
else:
raise Exception("You may have attempted to deploy a config "
"based lambda function with an unsupported config type. "
"The most recent AWS config types are here: http://docs.aws"
".amazon.com/config/latest/developerguide/resource"
"-config-reference.html.")
params['Scope'] = {
'ComplianceResourceTypes': [config_type]}
else:
params['Scope']['ComplianceResourceTypes'] = self.data.get(
'resource-types', ())
return params
def get(self, rule_name):
rules = resource_exists(
self.client.describe_config_rules,
ConfigRuleNames=[rule_name],
NotFound="NoSuchConfigRuleException")
if not rules:
return rules
return rules['ConfigRules'][0]
@staticmethod
def delta(rule, params):
# doesn't seem like we have anything mutable at the moment,
# since we restrict params, maybe reusing the same policy name
# with a different resource type.
if rule['Scope'] != params['Scope']:
return True
if rule['Source'] != params['Source']:
return True
if rule.get('Description', '') != rule.get('Description', ''):
return True
return False
def add(self, func):
rule = self.get(func.name)
params = self.get_rule_params(func)
if rule and self.delta(rule, params):
log.debug("Updating config rule for %s" % self)
rule.update(params)
return self.client.put_config_rule(ConfigRule=rule)
elif rule:
log.debug("Config rule up to date")
return
try:
self.session.client('lambda').add_permission(
FunctionName=func.name,
StatementId=func.name,
SourceAccount=func.arn.split(':')[4],
Action='lambda:InvokeFunction',
Principal='config.amazonaws.com')
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceConflictException':
raise
log.debug("Adding config rule for %s" % func.name)
return self.client.put_config_rule(ConfigRule=params)
def remove(self, func):
rule = self.get(func.name)
if not rule:
return
try:
self.client.delete_config_rule(
ConfigRuleName=func.name)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
|
|
# -*- coding: utf-8 -*-
"""
Features
------
Extra features Elements.
"""
import json
from jinja2 import Template
from six import text_type, binary_type
from branca.utilities import (
_parse_size, _locations_mirror, _locations_tolist, image_to_url,
none_min, none_max, iter_points
)
from branca.element import (Element, Figure, JavascriptLink, CssLink,
MacroElement)
from branca.colormap import LinearColormap
from .map import Layer, Icon, Marker, Popup, FeatureGroup
class WmsTileLayer(Layer):
"""
Creates a Web Map Service (WMS) layer.
Parameters
----------
url : str
The url of the WMS server.
name : string, default None
The name of the Layer, as it will appear in LayerControls
layers : str, default None
The names of the layers to be displayed.
styles : str, default None
Comma-separated list of WMS styles.
format : str, default None
The format of the service output.
Ex: 'iamge/png'
transparent: bool, default True
Whether the layer shall allow transparency.
version : str, default '1.1.1'
Version of the WMS service to use.
attr : str, default None
The attribution of the service.
Will be displayed in the bottom right corner.
overlay : bool, default False
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls
For more information see:
http://leafletjs.com/reference.html#tilelayer-wms
"""
def __init__(self, url, name=None, layers=None, styles=None, format=None,
transparent=True, version='1.1.1', attr=None, overlay=True,
control=True):
super(WmsTileLayer, self).__init__(overlay=overlay, control=control, name=name) # noqa
self.url = url
self.attribution = attr if attr is not None else ''
# Options.
self.layers = layers if layers else ''
self.styles = styles if styles else ''
self.format = format if format else 'image/jpeg'
self.transparent = transparent
self.version = version
# FIXME: Should be map CRS!
# self.crs = crs if crs else 'null
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer.wms(
'{{ this.url }}',
{
layers: '{{ this.layers }}',
styles: '{{ this.styles }}',
format: '{{ this.format }}',
transparent: {{ this.transparent.__str__().lower() }},
version: '{{ this.version }}',
{% if this.attribution %} attribution: '{{this.attribution}}'{% endif %}
}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""") # noqa
class RegularPolygonMarker(Marker):
"""
Custom markers using the Leaflet Data Vis Framework.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Marker (Northing, Easting)
color: string, default 'black'
Marker line color
opacity: float, default 1
Line opacity, scale 0-1
weight: int, default 2
Stroke weight in pixels
fill_color: string, default 'blue'
Marker fill color
fill_opacity: float, default 1
Marker fill opacity
number_of_sides: int, default 4
Number of polygon sides
rotation: int, default 0
Rotation angle in degrees
radius: int, default 15
Marker radius, in pixels
popup: string or folium.Popup, default None
Input text or visualization for object. Can pass either text,
or a folium.Popup object.
If None, no popup will be displayed.
Returns
-------
Polygon marker names and HTML in obj.template_vars
For more information, see https://humangeo.github.io/leaflet-dvf/
"""
def __init__(self, location, color='black', opacity=1, weight=2,
fill_color='blue', fill_opacity=1,
number_of_sides=4, rotation=0, radius=15, popup=None):
super(RegularPolygonMarker, self).__init__(location, popup=popup)
self._name = 'RegularPolygonMarker'
self.color = color
self.opacity = opacity
self.weight = weight
self.fill_color = fill_color
self.fill_opacity = fill_opacity
self.number_of_sides = number_of_sides
self.rotation = rotation
self.radius = radius
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = new L.RegularPolygonMarker(
new L.LatLng({{this.location[0]}},{{this.location[1]}}),
{
icon : new L.Icon.Default(),
color: '{{this.color}}',
opacity: {{this.opacity}},
weight: {{this.weight}},
fillColor: '{{this.fill_color}}',
fillOpacity: {{this.fill_opacity}},
numberOfSides: {{this.number_of_sides}},
rotation: {{this.rotation}},
radius: {{this.radius}}
}
)
.addTo({{this._parent.get_name()}});
{% endmacro %}
""")
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
super(RegularPolygonMarker, self).render()
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.header.add_child(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet-dvf/0.3.0/leaflet-dvf.markers.min.js"), # noqa
name='dvf_js')
class Vega(Element):
"""
Creates a Vega chart element.
Parameters
----------
data: JSON-like str or object
The Vega description of the chart.
It can also be any object that has a method `to_json`,
so that you can (for instance) provide a `vincent` chart.
width: int or str, default None
The width of the output element.
If None, either data['width'] (if available) or '100%' will be used.
Ex: 120, '120px', '80%'
height: int or str, default None
The height of the output element.
If None, either data['width'] (if available) or '100%' will be used.
Ex: 120, '120px', '80%'
left: int or str, default '0%'
The horizontal distance of the output with respect to the parent
HTML object. Ex: 120, '120px', '80%'
top: int or str, default '0%'
The vertical distance of the output with respect to the parent
HTML object. Ex: 120, '120px', '80%'
position: str, default 'relative'
The `position` argument that the CSS shall contain.
Ex: 'relative', 'absolute'
"""
def __init__(self, data, width=None, height=None,
left="0%", top="0%", position='relative'):
super(Vega, self).__init__()
self._name = 'Vega'
self.data = data.to_json() if hasattr(data, 'to_json') else data
# FIXME:
if isinstance(self.data, text_type) or isinstance(data, binary_type):
self.data = json.loads(self.data)
# Size Parameters.
self.width = _parse_size(self.data.get('width', '100%') if
width is None else width)
self.height = _parse_size(self.data.get('height', '100%') if
height is None else height)
self.left = _parse_size(left)
self.top = _parse_size(top)
self.position = position
self._template = Template(u"")
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
self.json = json.dumps(self.data)
self._parent.html.add_child(Element(Template("""
<div id="{{this.get_name()}}"></div>
""").render(this=self, kwargs=kwargs)), name=self.get_name())
self._parent.script.add_child(Element(Template("""
vega_parse({{this.json}},{{this.get_name()}});
""").render(this=self)), name=self.get_name())
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.header.add_child(Element(Template("""
<style> #{{this.get_name()}} {
position : {{this.position}};
width : {{this.width[0]}}{{this.width[1]}};
height: {{this.height[0]}}{{this.height[1]}};
left: {{this.left[0]}}{{this.left[1]}};
top: {{this.top[0]}}{{this.top[1]}};
</style>
""").render(this=self, **kwargs)), name=self.get_name())
figure.header.add_child(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"), # noqa
name='d3')
figure.header.add_child(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/vega/1.4.3/vega.min.js"), # noqa
name='vega')
figure.header.add_child(
JavascriptLink("https://code.jquery.com/jquery-2.1.0.min.js"),
name='jquery')
figure.script.add_child(
Template("""function vega_parse(spec, div) {
vg.parse.spec(spec, function(chart) { chart({el:div}).update(); });}"""), # noqa
name='vega_parse')
class GeoJson(Layer):
"""
Creates a GeoJson object for plotting into a Map.
Parameters
----------
data: file, dict or str.
The GeoJSON data you want to plot.
* If file, then data will be read in the file and fully
embedded in Leaflet's JavaScript.
* If dict, then data will be converted to JSON and embedded
in the JavaScript.
* If str, then data will be passed to the JavaScript as-is.
style_function: function, default None
A function mapping a GeoJson Feature to a style dict.
name : string, default None
The name of the Layer, as it will appear in LayerControls
overlay : bool, default False
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls
smooth_factor: float, default None
How much to simplify the polyline on each zoom level. More means
better performance and smoother look, and less means more accurate
representation. Leaflet defaults to 1.0.
Examples
--------
>>> # Providing file that shall be embedded.
>>> GeoJson(open('foo.json'))
>>> # Providing filename that shall not be embedded.
>>> GeoJson('foo.json')
>>> # Providing dict.
>>> GeoJson(json.load(open('foo.json')))
>>> # Providing string.
>>> GeoJson(open('foo.json').read())
>>> # Provide a style_function that color all states green but Alabama.
>>> style_function = lambda x: {'fillColor': '#0000ff' if
... x['properties']['name']=='Alabama' else
... '#00ff00'}
>>> GeoJson(geojson, style_function=style_function)
"""
def __init__(self, data, style_function=None, name=None,
overlay=True, control=True, smooth_factor=None,
highlight_function=None):
super(GeoJson, self).__init__(name=name, overlay=overlay,
control=control)
self._name = 'GeoJson'
if hasattr(data, 'read'):
self.embed = True
self.data = json.load(data)
elif isinstance(data, dict):
self.embed = True
self.data = data
elif isinstance(data, text_type) or isinstance(data, binary_type):
if data.lstrip()[0] in '[{': # This is a GeoJSON inline string
self.embed = True
self.data = json.loads(data)
else: # This is a filename
self.embed = False
self.data = data
elif data.__class__.__name__ in ['GeoDataFrame', 'GeoSeries']:
self.embed = True
if hasattr(data, '__geo_interface__'):
# We have a GeoPandas 0.2 object.
self.data = json.loads(json.dumps(data.to_crs(epsg='4326').__geo_interface__)) # noqa
elif hasattr(data, 'columns'):
# We have a GeoDataFrame 0.1
self.data = json.loads(data.to_crs(epsg='4326').to_json())
else:
msg = 'Unable to transform this object to a GeoJSON.'
raise ValueError(msg)
else:
raise ValueError('Unhandled object {!r}.'.format(data))
if style_function is None:
def style_function(x):
return {}
self.style_function = style_function
self.highlight = highlight_function is not None
if highlight_function is None:
def highlight_function(x):
return {}
self.highlight_function = highlight_function
self.smooth_factor = smooth_factor
self._template = Template(u"""
{% macro script(this, kwargs) %}
{% if this.highlight %}
{{this.get_name()}}_onEachFeature = function onEachFeature(feature, layer) {
layer.on({
mouseout: function(e) {
e.target.setStyle(e.target.feature.properties.style);},
mouseover: function(e) {
e.target.setStyle(e.target.feature.properties.highlight);},
click: function(e) {
{{this._parent.get_name()}}.fitBounds(e.target.getBounds());}
});
};
{% endif %}
var {{this.get_name()}} = L.geoJson(
{% if this.embed %}{{this.style_data()}}{% else %}"{{this.data}}"{% endif %}
{% if this.smooth_factor is not none or this.highlight %}
, {
{% if this.smooth_factor is not none %}
smoothFactor:{{this.smooth_factor}}
{% endif %}
{% if this.highlight %}
{% if this.smooth_factor is not none %}
,
{% endif %}
onEachFeature: {{this.get_name()}}_onEachFeature
{% endif %}
}
{% endif %}
).addTo({{this._parent.get_name()}});
{{this.get_name()}}.setStyle(function(feature) {return feature.properties.style;});
{% endmacro %}
""") # noqa
def style_data(self):
"""
Applies `self.style_function` to each feature of `self.data` and
returns a corresponding JSON output.
"""
if 'features' not in self.data.keys():
# Catch case when GeoJSON is just a single Feature or a geometry.
if not (isinstance(self.data, dict) and 'geometry' in self.data.keys()): # noqa
# Catch case when GeoJSON is just a geometry.
self.data = {'type': 'Feature', 'geometry': self.data}
self.data = {'type': 'FeatureCollection', 'features': [self.data]}
for feature in self.data['features']:
feature.setdefault('properties', {}).setdefault('style', {}).update(self.style_function(feature)) # noqa
feature.setdefault('properties', {}).setdefault('highlight', {}).update(self.highlight_function(feature)) # noqa
return json.dumps(self.data, sort_keys=True)
def _get_self_bounds(self):
"""
Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]]
"""
if not self.embed:
raise ValueError('Cannot compute bounds of non-embedded GeoJSON.')
if 'features' not in self.data.keys():
# Catch case when GeoJSON is just a single Feature or a geometry.
if not (isinstance(self.data, dict) and 'geometry' in self.data.keys()): # noqa
# Catch case when GeoJSON is just a geometry.
self.data = {'type': 'Feature', 'geometry': self.data}
self.data = {'type': 'FeatureCollection', 'features': [self.data]}
bounds = [[None, None], [None, None]]
for feature in self.data['features']:
for point in iter_points(feature.get('geometry', {}).get('coordinates', {})): # noqa
bounds = [
[
none_min(bounds[0][0], point[1]),
none_min(bounds[0][1], point[0]),
],
[
none_max(bounds[1][0], point[1]),
none_max(bounds[1][1], point[0]),
],
]
return bounds
class TopoJson(Layer):
"""
Creates a TopoJson object for plotting into a Map.
Parameters
----------
data: file, dict or str.
The TopoJSON data you want to plot.
* If file, then data will be read in the file and fully
embedded in Leaflet's JavaScript.
* If dict, then data will be converted to JSON and embedded
in the JavaScript.
* If str, then data will be passed to the JavaScript as-is.
object_path: str
The path of the desired object into the TopoJson structure.
Ex: 'objects.myobject'.
style_function: function, default None
A function mapping a TopoJson geometry to a style dict.
name : string, default None
The name of the Layer, as it will appear in LayerControls
overlay : bool, default False
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls
smooth_factor: float, default None
How much to simplify the polyline on each zoom level. More means
better performance and smoother look, and less means more accurate
representation. Leaflet defaults to 1.0.
Examples
--------
>>> # Providing file that shall be embeded.
>>> TopoJson(open('foo.json'), 'object.myobject')
>>> # Providing filename that shall not be embeded.
>>> TopoJson('foo.json', 'object.myobject')
>>> # Providing dict.
>>> TopoJson(json.load(open('foo.json')), 'object.myobject')
>>> # Providing string.
>>> TopoJson(open('foo.json').read(), 'object.myobject')
>>> # Provide a style_function that color all states green but Alabama.
>>> style_function = lambda x: {'fillColor': '#0000ff' if
... x['properties']['name']=='Alabama' else
... '#00ff00'}
>>> TopoJson(topo_json, 'object.myobject', style_function=style_function)
"""
def __init__(self, data, object_path, style_function=None,
name=None, overlay=True, control=True, smooth_factor=None):
super(TopoJson, self).__init__(name=name, overlay=overlay,
control=control)
self._name = 'TopoJson'
if 'read' in dir(data):
self.embed = True
self.data = json.load(data)
elif type(data) is dict:
self.embed = True
self.data = data
else:
self.embed = False
self.data = data
self.object_path = object_path
if style_function is None:
def style_function(x):
return {}
self.style_function = style_function
self.smooth_factor = smooth_factor
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}}_data = {{this.style_data()}};
var {{this.get_name()}} = L.geoJson(topojson.feature(
{{this.get_name()}}_data,
{{this.get_name()}}_data.{{this.object_path}})
{% if this.smooth_factor is not none %}
, {smoothFactor: {{this.smooth_factor}}}
{% endif %}).addTo({{this._parent.get_name()}});
{{this.get_name()}}.setStyle(function(feature) {return feature.properties.style;});
{% endmacro %}
""") # noqa
def style_data(self):
"""
Applies self.style_function to each feature of self.data and returns
a corresponding JSON output.
"""
def recursive_get(data, keys):
if len(keys):
return recursive_get(data.get(keys[0]), keys[1:])
else:
return data
geometries = recursive_get(self.data, self.object_path.split('.'))['geometries'] # noqa
for feature in geometries:
feature.setdefault('properties', {}).setdefault('style', {}).update(self.style_function(feature)) # noqa
return json.dumps(self.data, sort_keys=True)
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
super(TopoJson, self).render(**kwargs)
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.header.add_child(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/topojson/1.6.9/topojson.min.js"), # noqa
name='topojson')
def _get_self_bounds(self):
"""
Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]]
"""
if not self.embed:
raise ValueError('Cannot compute bounds of non-embedded TopoJSON.')
xmin, xmax, ymin, ymax = None, None, None, None
for arc in self.data['arcs']:
x, y = 0, 0
for dx, dy in arc:
x += dx
y += dy
xmin = none_min(x, xmin)
xmax = none_max(x, xmax)
ymin = none_min(y, ymin)
ymax = none_max(y, ymax)
return [
[
self.data['transform']['translate'][1] + self.data['transform']['scale'][1] * ymin, # noqa
self.data['transform']['translate'][0] + self.data['transform']['scale'][0] * xmin # noqa
],
[
self.data['transform']['translate'][1] + self.data['transform']['scale'][1] * ymax, # noqa
self.data['transform']['translate'][0] + self.data['transform']['scale'][0] * xmax # noqa
]
]
class MarkerCluster(Layer):
"""
Creates a MarkerCluster element to append into a map with
Map.add_child.
Parameters
----------
name : string, default None
The name of the Layer, as it will appear in LayerControls
overlay : bool, default False
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls
"""
def __init__(self, name=None, overlay=True, control=True):
super(MarkerCluster, self).__init__(name=name, overlay=overlay,
control=control)
self._name = 'MarkerCluster'
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.markerClusterGroup();
{{this._parent.get_name()}}.addLayer({{this.get_name()}});
{% endmacro %}
""")
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
super(MarkerCluster, self).render()
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.header.add_child(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/leaflet.markercluster-src.js"), # noqa
name='marker_cluster_src')
figure.header.add_child(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/leaflet.markercluster.js"), # noqa
name='marker_cluster')
figure.header.add_child(
CssLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/MarkerCluster.css"), # noqa
name='marker_cluster_css')
figure.header.add_child(
CssLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/MarkerCluster.Default.css"), # noqa
name="marker_cluster_default_css")
class DivIcon(MacroElement):
"""
Represents a lightweight icon for markers that uses a simple `div`
element instead of an image.
Parameters
----------
icon_size : tuple of 2 int
Size of the icon image in pixels.
icon_anchor : tuple of 2 int
The coordinates of the "tip" of the icon
(relative to its top left corner).
The icon will be aligned so that this point is at the
marker's geographical location.
popup_anchor : tuple of 2 int
The coordinates of the point from which popups will "open",
relative to the icon anchor.
class_name : string
A custom class name to assign to the icon.
Leaflet defaults is 'leaflet-div-icon' which draws a little white
square with a shadow. We set it 'empty' in folium.
html : string
A custom HTML code to put inside the div element.
For more information see:
http://leafletjs.com/reference.html#divicon
"""
def __init__(self, html=None, icon_size=None, icon_anchor=None,
popup_anchor=None, class_name='empty'):
super(DivIcon, self).__init__()
self._name = 'DivIcon'
self.icon_size = icon_size
self.icon_anchor = icon_anchor
self.popup_anchor = popup_anchor
self.html = html
self.className = class_name
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.divIcon({
{% if this.icon_size %}iconSize: [{{this.icon_size[0]}},{{this.icon_size[1]}}],{% endif %}
{% if this.icon_anchor %}iconAnchor: [{{this.icon_anchor[0]}},{{this.icon_anchor[1]}}],{% endif %}
{% if this.popup_anchor %}popupAnchor: [{{this.popup_anchor[0]}},{{this.popup_anchor[1]}}],{% endif %}
{% if this.className %}className: '{{this.className}}',{% endif %}
{% if this.html %}html: '{{this.html}}',{% endif %}
});
{{this._parent.get_name()}}.setIcon({{this.get_name()}});
{% endmacro %}
""") # noqa
class Circle(Marker):
"""
Creates a Circle object for plotting on a Map.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Marker (Northing, Easting)
radius: int
The radius of the circle in meters. For setting the radius in pixel,
use CircleMarker.
color: str, default 'black'
The color of the marker's edge in a HTML-compatible format.
fill_color: str, default 'black'
The fill color of the marker in a HTML-compatible format.
fill_opacity: float, default 0.6
The fill opacity of the marker, between 0. and 1.
popup: string or folium.Popup, default None
Input text or visualization for object.
"""
def __init__(self, location, radius=500, color='black',
fill_color='black', fill_opacity=0.6, popup=None):
super(Circle, self).__init__(location, popup=popup)
self._name = 'Circle'
self.radius = radius
self.color = color
self.fill_color = fill_color
self.fill_opacity = fill_opacity
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.circle(
[{{this.location[0]}},{{this.location[1]}}],
{{ this.radius }},
{
color: '{{ this.color }}',
fillColor: '{{ this.fill_color }}',
fillOpacity: {{ this.fill_opacity }}
}
)
.addTo({{this._parent.get_name()}});
{% endmacro %}
""")
class CircleMarker(Marker):
"""
Creates a CircleMarker object for plotting on a Map.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Marker (Northing, Easting)
radius: int
The radius of the circle in pixels. For setting the radius in meter,
use Circle.
color: str, default 'black'
The color of the marker's edge in a HTML-compatible format.
fill_color: str, default 'black'
The fill color of the marker in a HTML-compatible format.
fill_opacity: float, default 0.6
The fill opacity of the marker, between 0. and 1.
popup: string or folium.Popup, default None
Input text or visualization for object.
"""
def __init__(self, location, radius=500, color='black',
fill_color='black', fill_opacity=0.6, popup=None):
super(CircleMarker, self).__init__(location, popup=popup)
self._name = 'CircleMarker'
self.radius = radius
self.color = color
self.fill_color = fill_color
self.fill_opacity = fill_opacity
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.circleMarker(
[{{this.location[0]}},{{this.location[1]}}],
{
color: '{{ this.color }}',
fillColor: '{{ this.fill_color }}',
fillOpacity: {{ this.fill_opacity }}
}
)
.setRadius({{ this.radius }})
.addTo({{this._parent.get_name()}});
{% endmacro %}
""")
class RectangleMarker(Marker):
def __init__(self, bounds, color='black', weight=1, fill_color='black',
fill_opacity=0.6, popup=None):
"""
Creates a RectangleMarker object for plotting on a Map.
Parameters
----------
bounds: tuple or list, default None
Latitude and Longitude of Marker (southWest and northEast)
color: string, default ('black')
Edge color of a rectangle.
weight: float, default (1)
Edge line width of a rectangle.
fill_color: string, default ('black')
Fill color of a rectangle.
fill_opacity: float, default (0.6)
Fill opacity of a rectangle.
popup: string or folium.Popup, default None
Input text or visualization for object.
Returns
-------
folium.features.RectangleMarker object
Example
-------
>>> RectangleMarker(
... bounds=[[35.681, 139.766], [35.691, 139.776]],
... color='blue', fill_color='red', popup='Tokyo, Japan'
... )
"""
super(RectangleMarker, self).__init__(bounds, popup=popup)
self._name = 'RectangleMarker'
self.color = color
self.weight = weight
self.fill_color = fill_color
self.fill_opacity = fill_opacity
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.rectangle(
[[{{this.location[0]}},{{this.location[1]}}],
[{{this.location[2]}},{{this.location[3]}}]],
{
color: '{{ this.color }}',
fillColor: '{{ this.fill_color }}',
fillOpacity: {{ this.fill_opacity }},
weight: {{ this.weight }}
}).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
class PolygonMarker(Marker):
def __init__(self, locations, color='black', weight=1, fill_color='black',
fill_opacity=0.6, popup=None, latlon=True):
"""
Creates a PolygonMarker object for plotting on a Map.
Parameters
----------
locations: tuple or list, default None
Latitude and Longitude of Polygon
color: string, default ('black')
Edge color of a polygon.
weight: float, default (1)
Edge line width of a polygon.
fill_color: string, default ('black')
Fill color of a polygon.
fill_opacity: float, default (0.6)
Fill opacity of a polygon.
popup: string or folium.Popup, default None
Input text or visualization for object.
Returns
-------
folium.features.Polygon object
Examples
--------
>>> locations = [[35.6762, 139.7795],
... [35.6718, 139.7831],
... [35.6767, 139.7868],
... [35.6795, 139.7824],
... [35.6787, 139.7791]]
>>> Polygon(locations, color='blue', weight=10, fill_color='red',
... fill_opacity=0.5, popup='Tokyo, Japan'))
"""
super(PolygonMarker, self).__init__((
_locations_mirror(locations) if not latlon else
_locations_tolist(locations)), popup=popup
)
self._name = 'PolygonMarker'
self.color = color
self.weight = weight
self.fill_color = fill_color
self.fill_opacity = fill_opacity
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.polygon({{this.location}},
{
color: '{{ this.color }}',
fillColor: '{{ this.fill_color }}',
fillOpacity: {{ this.fill_opacity }},
weight: {{ this.weight }}
}).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
class LatLngPopup(MacroElement):
"""
When one clicks on a Map that contains a LatLngPopup,
a popup is shown that displays the latitude and longitude of the pointer.
"""
def __init__(self):
super(LatLngPopup, self).__init__()
self._name = 'LatLngPopup'
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.popup();
function latLngPop(e) {
{{this.get_name()}}
.setLatLng(e.latlng)
.setContent("Latitude: " + e.latlng.lat.toFixed(4) +
"<br>Longitude: " + e.latlng.lng.toFixed(4))
.openOn({{this._parent.get_name()}});
}
{{this._parent.get_name()}}.on('click', latLngPop);
{% endmacro %}
""") # noqa
class ClickForMarker(MacroElement):
"""
When one clicks on a Map that contains a ClickForMarker,
a Marker is created at the pointer's position.
Parameters
----------
popup: str, default None
Text to display in the markers' popups.
If None, the popups will display the marker's latitude and longitude.
"""
def __init__(self, popup=None):
super(ClickForMarker, self).__init__()
self._name = 'ClickForMarker'
if popup:
self.popup = ''.join(['"', popup, '"'])
else:
self.popup = '"Latitude: " + lat + "<br>Longitude: " + lng '
self._template = Template(u"""
{% macro script(this, kwargs) %}
function newMarker(e){
var new_mark = L.marker().setLatLng(e.latlng).addTo({{this._parent.get_name()}});
new_mark.dragging.enable();
new_mark.on('dblclick', function(e){ {{this._parent.get_name()}}.removeLayer(e.target)})
var lat = e.latlng.lat.toFixed(4),
lng = e.latlng.lng.toFixed(4);
new_mark.bindPopup({{ this.popup }});
};
{{this._parent.get_name()}}.on('click', newMarker);
{% endmacro %}
""") # noqa
class PolyLine(MacroElement):
"""
Creates a PolyLine object to append into a map with
Map.add_child.
Parameters
----------
locations: list of points (latitude, longitude)
Latitude and Longitude of line (Northing, Easting)
color: string, default Leaflet's default ('#03f')
weight: float, default Leaflet's default (5)
opacity: float, default Leaflet's default (0.5)
latlon: bool, default True
Whether locations are given in the form [[lat, lon]]
or not ([[lon, lat]] if False).
Note that the default GeoJson format is latlon=False,
while Leaflet polyline's default is latlon=True.
popup: string or folium.Popup, default None
Input text or visualization for object.
"""
def __init__(self, locations, color=None, weight=None,
opacity=None, latlon=True, popup=None):
super(PolyLine, self).__init__()
self._name = 'PolyLine'
self.data = (_locations_mirror(locations) if not latlon else
_locations_tolist(locations))
self.color = color
self.weight = weight
self.opacity = opacity
if isinstance(popup, text_type) or isinstance(popup, binary_type):
self.add_child(Popup(popup))
elif popup is not None:
self.add_child(popup)
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.polyline(
{{this.data}},
{
{% if this.color != None %}color: '{{ this.color }}',{% endif %}
{% if this.weight != None %}weight: {{ this.weight }},{% endif %}
{% if this.opacity != None %}opacity: {{ this.opacity }},{% endif %}
});
{{this._parent.get_name()}}.addLayer({{this.get_name()}});
{% endmacro %}
""") # noqa
def _get_self_bounds(self):
"""
Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]]
"""
bounds = [[None, None], [None, None]]
for point in iter_points(self.data):
bounds = [
[
none_min(bounds[0][0], point[0]),
none_min(bounds[0][1], point[1]),
],
[
none_max(bounds[1][0], point[0]),
none_max(bounds[1][1], point[1]),
],
]
return bounds
class CustomIcon(Icon):
"""
Create a custom icon, based on an image.
Parameters
----------
icon_image : string, file or array-like object
The data you want to use as an icon.
* If string, it will be written directly in the output file.
* If file, it's content will be converted as embedded in the
output file.
* If array-like, it will be converted to PNG base64 string
and embedded in the output.
icon_size : tuple of 2 int
Size of the icon image in pixels.
icon_anchor : tuple of 2 int
The coordinates of the "tip" of the icon
(relative to its top left corner).
The icon will be aligned so that this point is at the
marker's geographical location.
shadow_image : string, file or array-like object
The data for the shadow image. If not specified,
no shadow image will be created.
shadow_size : tuple of 2 int
Size of the shadow image in pixels.
shadow_anchor : tuple of 2 int
The coordinates of the "tip" of the shadow relative to its
top left corner (the same as icon_anchor if not specified).
popup_anchor : tuple of 2 int
The coordinates of the point from which popups will "open",
relative to the icon anchor.
"""
def __init__(self, icon_image, icon_size=None, icon_anchor=None,
shadow_image=None, shadow_size=None, shadow_anchor=None,
popup_anchor=None):
super(Icon, self).__init__()
self._name = 'CustomIcon'
self.icon_url = image_to_url(icon_image)
self.icon_size = icon_size
self.icon_anchor = icon_anchor
self.shadow_url = (image_to_url(shadow_image)
if shadow_image is not None else None)
self.shadow_size = shadow_size
self.shadow_anchor = shadow_anchor
self.popup_anchor = popup_anchor
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.icon({
iconUrl: '{{this.icon_url}}',
{% if this.icon_size %}iconSize: [{{this.icon_size[0]}},{{this.icon_size[1]}}],{% endif %}
{% if this.icon_anchor %}iconAnchor: [{{this.icon_anchor[0]}},{{this.icon_anchor[1]}}],{% endif %}
{% if this.shadow_url %}shadowUrl: '{{this.shadow_url}}',{% endif %}
{% if this.shadow_size %}shadowSize: [{{this.shadow_size[0]}},{{this.shadow_size[1]}}],{% endif %}
{% if this.shadow_anchor %}shadowAnchor: [{{this.shadow_anchor[0]}},{{this.shadow_anchor[1]}}],{% endif %}
{% if this.popup_anchor %}popupAnchor: [{{this.popup_anchor[0]}},{{this.popup_anchor[1]}}],{% endif %}
});
{{this._parent.get_name()}}.setIcon({{this.get_name()}});
{% endmacro %}
""") # noqa
class ColorLine(FeatureGroup):
"""
Draw data on a map with specified colors.
Parameters
----------
positions: tuple or list
The list of points latitude and longitude
colors: tuple or list
The list of segments colors.
It must have length equal to `len(positions)-1`.
colormap: branca.colormap.Colormap or list or tuple
The colormap to use. If a list or tuple of colors is provided,
a LinearColormap will be created from it.
nb_steps: int, default 12
To have lighter output the colormap will be discretized
to that number of colors.
opacity: float, default 1
Line opacity, scale 0-1
weight: int, default 2
Stroke weight in pixels
**kwargs
Further parameters available. See folium.map.FeatureGroup
Returns
-------
A ColorLine object that you can `add_to` a Map.
"""
def __init__(self, positions, colors, colormap=None, nb_steps=12,
weight=None, opacity=None, **kwargs):
super(ColorLine, self).__init__(**kwargs)
self._name = 'ColorLine'
if colormap is None:
cm = LinearColormap(['green', 'yellow', 'red'],
vmin=min(colors),
vmax=max(colors),
).to_step(nb_steps)
elif isinstance(colormap, LinearColormap):
cm = colormap.to_step(nb_steps)
elif isinstance(colormap, list) or isinstance(colormap, tuple):
cm = LinearColormap(colormap,
vmin=min(colors),
vmax=max(colors),
).to_step(nb_steps)
else:
cm = colormap
out = {}
for (lat1, lng1), (lat2, lng2), color in zip(positions[:-1], positions[1:], colors): # noqa
out.setdefault(cm(color), []).append([[lat1, lng1], [lat2, lng2]])
for key, val in out.items():
self.add_child(PolyLine(val, color=key, weight=weight, opacity=opacity)) # noqa
|
|
"""Sensitive variant calling using VarDict.
Defaults to using the faster, equally sensitive Java port:
https://github.com/AstraZeneca-NGS/VarDictJava
if 'vardict' or 'vardict-java' is specified in the configuration. To use the
VarDict perl version:
https://github.com/AstraZeneca-NGS/VarDict
specify 'vardict-perl'.
"""
import os
import itertools
import sys
import toolz as tz
import pybedtools
from bcbio import broad, utils
from bcbio.bam import highdepth
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils, shared
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import annotation, bamprep, vcfutils
def _is_bed_file(target):
return target and isinstance(target, basestring) and os.path.isfile(target)
def _vardict_options_from_config(items, config, out_file, target=None):
opts = ["-c 1", "-S 2", "-E 3", "-g 4"]
# ["-z", "-F", "-c", "1", "-S", "2", "-E", "3", "-g", "4", "-x", "0",
# "-k", "3", "-r", "4", "-m", "8"]
resources = config_utils.get_resources("vardict", config)
if resources.get("options"):
opts += resources["options"]
assert _is_bed_file(target)
if any(tz.get_in(["config", "algorithm", "coverage_interval"], x, "").lower() == "genome"
for x in items):
target = shared.remove_highdepth_regions(target, items)
target = shared.remove_lcr_regions(target, items)
target = _enforce_max_region_size(target, items[0])
opts += [target] # this must be the last option
return opts
def _enforce_max_region_size(in_file, data):
"""Ensure we don't have any chunks in the region greater than 1Mb.
Larger sections have high memory usage on VarDictJava and failures
on VarDict. This creates minimum windows from the input BED file
to avoid these issues. Downstream VarDict merging sorts out any
variants across windows.
"""
max_size = 1e6
overlap_size = 250
def _has_larger_regions(f):
return any(r.stop - r.start > max_size for r in pybedtools.BedTool(f))
out_file = "%s-regionlimit%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
if _has_larger_regions(in_file):
with file_transaction(data, out_file) as tx_out_file:
pybedtools.BedTool().window_maker(w=max_size,
s=max_size - overlap_size,
b=pybedtools.BedTool(in_file)).saveas(tx_out_file)
else:
utils.symlink_plus(in_file, out_file)
return out_file
def run_vardict(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run VarDict variant calling.
"""
if vcfutils.is_paired_analysis(align_bams, items):
call_file = _run_vardict_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = _run_vardict_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file
def _get_jvm_opts(data, out_file):
"""Retrieve JVM options when running the Java version of VarDict.
"""
if get_vardict_command(data) == "vardict-java":
resources = config_utils.get_resources("vardict", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx4g"])
jvm_opts += broad.get_default_jvm_opts(os.path.dirname(out_file))
return "export VAR_DICT_OPTS='%s' && " % " ".join(jvm_opts)
else:
return ""
def _run_vardict_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect SNPs and indels with VarDict.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
target = shared.subset_variant_regions(dd.get_variant_regions(items[0]), region,
out_file, do_merge=False)
num_bams = len(align_bams)
sample_vcf_names = [] # for individual sample names, given batch calling may be required
for bamfile, item in itertools.izip(align_bams, items):
# prepare commands
sample = dd.get_sample_name(item)
vardict = get_vardict_command(items[0])
strandbias = "teststrandbias.R"
var2vcf = "var2vcf_valid.pl"
opts = (" ".join(_vardict_options_from_config(items, config, out_file, target))
if _is_bed_file(target) else "")
vcfstreamsort = config_utils.get_program("vcfstreamsort", config)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
freq = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
coverage_interval = utils.get_in(config, ("algorithm", "coverage_interval"), "exome")
# for deep targeted panels, require 50 worth of coverage
var2vcf_opts = " -v 50 " if highdepth.get_median_coverage(items[0]) > 5000 else ""
fix_ambig = vcfutils.fix_ambiguous_cl()
remove_dup = vcfutils.remove_dup_cl()
jvm_opts = _get_jvm_opts(items[0], tx_out_file)
r_setup = "unset R_HOME && export PATH=%s:$PATH && " % os.path.dirname(utils.Rscript_cmd())
cmd = ("{r_setup}{jvm_opts}{vardict} -G {ref_file} -f {freq} "
"-N {sample} -b {bamfile} {opts} "
"| {strandbias}"
"| {var2vcf} -N {sample} -E -f {freq} {var2vcf_opts} "
"| {fix_ambig} | {remove_dup} | {vcfstreamsort} {compress_cmd}")
if num_bams > 1:
temp_file_prefix = out_file.replace(".gz", "").replace(".vcf", "") + item["name"][1]
tmp_out = temp_file_prefix + ".temp.vcf"
tmp_out += ".gz" if out_file.endswith("gz") else ""
sample_vcf_names.append(tmp_out)
with file_transaction(item, tmp_out) as tx_tmp_file:
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_tmp_file, config, samples=[sample])
else:
cmd += " > {tx_tmp_file}"
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
else:
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_out_file, config, samples=[sample])
else:
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
if num_bams > 1:
# N.B. merge_variant_files wants region in 1-based end-inclusive
# coordinates. Thus use bamprep.region_to_gatk
vcfutils.merge_variant_files(orig_files=sample_vcf_names,
out_file=tx_out_file, ref_file=ref_file,
config=config, region=bamprep.region_to_gatk(region))
out_file = (annotation.add_dbsnp(out_file, assoc_files["dbsnp"], config)
if assoc_files.get("dbsnp") else out_file)
return out_file
def _safe_to_float(x):
if x is None:
return None
else:
try:
return float(x)
except ValueError:
return None
def depth_freq_filter(line, tumor_index, aligner):
"""Command line to filter VarDict calls based on depth, frequency and quality.
Looks at regions with low depth for allele frequency (AF * DP < 6, the equivalent
of < 13bp for heterogygote calls, but generalized. Within these calls filters if a
calls has:
- Low mapping quality and multiple mismatches in a read (NM)
For bwa only: MQ < 55.0 and NM > 1.0 or MQ < 60.0 and NM > 2.0
- Low depth (DP < 10)
- Low QUAL (QUAL < 45)
Also filters in low allele frequency regions with poor quality, if all of these are
true:
- Allele frequency < 0.2
- Quality < 55
- P-value (SSF) > 0.06
"""
if line.startswith("#CHROM"):
headers = [('##FILTER=<ID=LowAlleleDepth,Description="Low depth per allele frequency '
'along with poor depth, quality, mapping quality and read mismatches.">'),
('##FILTER=<ID=LowFreqQuality,Description="Low frequency read with '
'poor quality and p-value (SSF).">')]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
sample_ft = {a: v for (a, v) in zip(parts[8].split(":"), parts[9 + tumor_index].split(":"))}
qual = _safe_to_float(parts[5])
dp = _safe_to_float(sample_ft.get("DP"))
af = _safe_to_float(sample_ft.get("AF"))
nm = _safe_to_float(sample_ft.get("NM"))
mq = _safe_to_float(sample_ft.get("MQ"))
ssfs = [x for x in parts[7].split(";") if x.startswith("SSF=")]
pval = _safe_to_float(ssfs[0].split("=")[-1] if ssfs else None)
fname = None
if dp is not None and af is not None:
if dp * af < 6:
if aligner == "bwa" and nm is not None and mq is not None:
if (mq < 55.0 and nm > 1.0) or (mq < 60.0 and nm > 2.0):
fname = "LowAlleleDepth"
if dp < 10:
fname = "LowAlleleDepth"
if qual is not None and qual < 45:
fname = "LowAlleleDepth"
if af is not None and qual is not None and pval is not None:
if af < 0.2 and qual < 55 and pval > 0.06:
fname = "LowFreqQuality"
if fname:
if parts[6] in set([".", "PASS"]):
parts[6] = fname
else:
parts[6] += ";%s" % fname
line = "\t".join(parts)
return line
def _run_vardict_paired(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect variants with Vardict.
This is used for paired tumor / normal samples.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
target = shared.subset_variant_regions(dd.get_variant_regions(items[0]), region,
out_file, do_merge=True)
paired = vcfutils.get_paired_bams(align_bams, items)
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_out_file, config,
samples=[x for x in [paired.tumor_name, paired.normal_name] if x])
else:
if not paired.normal_bam:
ann_file = _run_vardict_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return ann_file
vcffilter = config_utils.get_program("vcffilter", config)
vardict = get_vardict_command(items[0])
vcfstreamsort = config_utils.get_program("vcfstreamsort", config)
strandbias = "testsomatic.R"
var2vcf = "var2vcf_paired.pl"
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
freq = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
# merge bed file regions as amplicon VarDict is only supported in single sample mode
opts = " ".join(_vardict_options_from_config(items, config, out_file, target))
coverage_interval = utils.get_in(config, ("algorithm", "coverage_interval"), "exome")
# for deep targeted panels, require 50 worth of coverage
var2vcf_opts = " -v 50 " if highdepth.get_median_coverage(items[0]) > 5000 else ""
fix_ambig = vcfutils.fix_ambiguous_cl()
remove_dup = vcfutils.remove_dup_cl()
if any("vardict_somatic_filter" in tz.get_in(("config", "algorithm", "tools_off"), data, [])
for data in items):
somatic_filter = ""
freq_filter = ""
else:
var2vcf_opts += " -M " # this makes VarDict soft filter non-differential variants
somatic_filter = ("| sed 's/\\\\.*Somatic\\\\/Somatic/' "
"| sed 's/REJECT,Description=\".*\">/REJECT,Description=\"Not Somatic via VarDict\">/' "
"| %s -x 'bcbio.variation.freebayes.call_somatic(x)'" %
os.path.join(os.path.dirname(sys.executable), "py"))
freq_filter = ("| bcftools filter -m '+' -s 'REJECT' -e 'STATUS !~ \".*Somatic\"' 2> /dev/null "
"| %s -x 'bcbio.variation.vardict.depth_freq_filter(x, %s, \"%s\")'" %
(os.path.join(os.path.dirname(sys.executable), "py"),
0, dd.get_aligner(paired.tumor_data)))
jvm_opts = _get_jvm_opts(items[0], tx_out_file)
r_setup = "unset R_HOME && export PATH=%s:$PATH && " % os.path.dirname(utils.Rscript_cmd())
cmd = ("{r_setup}{jvm_opts}{vardict} -G {ref_file} -f {freq} "
"-N {paired.tumor_name} -b \"{paired.tumor_bam}|{paired.normal_bam}\" {opts} "
"| {strandbias} "
"| {var2vcf} -P 0.9 -m 4.25 -f {freq} {var2vcf_opts} "
"-N \"{paired.tumor_name}|{paired.normal_name}\" "
"{freq_filter} "
"{somatic_filter} | {fix_ambig} | {remove_dup} | {vcfstreamsort} "
"{compress_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
out_file = (annotation.add_dbsnp(out_file, assoc_files["dbsnp"], config)
if assoc_files.get("dbsnp") else out_file)
return out_file
def get_vardict_command(data):
"""
convert variantcaller specification to proper vardict command, handling
string or list specification
"""
vcaller = dd.get_variantcaller(data)
if isinstance(vcaller, list):
vardict = [x for x in vcaller if "vardict" in x]
if not vardict:
return None
vardict = vardict[0]
elif not vcaller:
return None
else:
vardict = vcaller
vardict = "vardict-java" if not vardict.endswith("-perl") else "vardict"
return vardict
|
|
# Copyright (c) 2021 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel@uavcan.org>
from __future__ import annotations
import sys
from typing import Iterator, Optional, Sequence, Callable
import itertools
import pyuavcan
from .register import ValueProxy, Natural16, Natural32, RelaxedValue
if sys.version_info >= (3, 9):
from collections.abc import MutableMapping
else: # pragma: no cover
from typing import MutableMapping # pylint: disable=ungrouped-imports
def make_transport(
registers: MutableMapping[str, ValueProxy],
*,
reconfigurable: bool = False,
) -> Optional[pyuavcan.transport.Transport]:
"""
Constructs a transport instance based on the configuration encoded in the supplied registers.
If more than one transport is defined, a redundant instance will be constructed.
The register schema is documented below per transport class
(refer to the transport class documentation to find the defaults for optional registers).
All transports also accept the following standard regsiters:
+-------------------+-------------------+-----------------------------------------------------------------------+
| Register name | Register type | Semantics |
+===================+===================+=======================================================================+
| ``uavcan.node.id``| ``natural16[1]`` | The node-ID to use. If the value exceeds the valid |
| | | range, the constructed node will be anonymous. |
+-------------------+-------------------+-----------------------------------------------------------------------+
.. list-table:: :mod:`pyuavcan.transport.udp`
:widths: 1 1 9
:header-rows: 1
* - Register name
- Register type
- Register semantics
* - ``uavcan.udp.iface``
- ``string``
- Whitespace-separated list of /16 IP subnet addresses.
16 least significant bits are replaced with the node-ID if configured, otherwise left unchanged.
E.g.: ``127.42.0.42``: node-ID 257, result ``127.42.1.1``;
``127.42.0.42``: anonymous, result ``127.42.0.42``.
* - ``uavcan.udp.duplicate_service_transfers``
- ``bit[1]``
- Apply deterministic data loss mitigation to RPC-service transfers by setting multiplication factor = 2.
* - ``uavcan.udp.mtu``
- ``natural16[1]``
- The MTU for all constructed transport instances.
.. list-table:: :mod:`pyuavcan.transport.serial`
:widths: 1 1 9
:header-rows: 1
* - Register name
- Register type
- Register semantics
* - ``uavcan.serial.iface``
- ``string``
- Whitespace-separated list of serial port names.
E.g.: ``/dev/ttyACM0``, ``COM9``, ``socket://127.0.0.1:50905``.
* - ``uavcan.serial.duplicate_service_transfers``
- ``bit[1]``
- Apply deterministic data loss mitigation to RPC-service transfers by setting multiplication factor = 2.
* - ``uavcan.serial.baudrate``
- ``natural32[1]``
- The baudrate to set for all specified serial ports. Leave unchanged if zero.
.. list-table:: :mod:`pyuavcan.transport.can`
:widths: 1 1 9
:header-rows: 1
* - Register name
- Register type
- Register semantics
* - ``uavcan.can.iface``
- ``string``
- Whitespace-separated list of CAN iface names.
Each iface name shall follow the format defined in :class:`pyuavcan.transport.can.media.pythoncan`.
E.g.: ``socketcan:vcan0``.
* - ``uavcan.can.mtu``
- ``natural16[1]``
- The MTU value to use with all constructed CAN transports.
Values other than 8 and 64 should not be used.
* - ``uavcan.can.bitrate``
- ``natural32[2]``
- The bitrates to use for all constructed CAN transports
for arbitration (first value) and data (second value) segments.
To use Classic CAN, set both to the same value and set MTU = 8.
.. list-table:: :mod:`pyuavcan.transport.loopback`
:widths: 1 1 9
:header-rows: 1
* - Register name
- Register type
- Register semantics
* - ``uavcan.loopback``
- ``bit[1]``
- If True, a loopback transport will be constructed. This is intended for testing only.
:param registers:
A mutable mapping of :class:`str` to :class:`pyuavcan.application.register.ValueProxy`.
Normally, it should be constructed by :func:`pyuavcan.application.make_registry`.
:param reconfigurable:
If False (default), the return value is:
- None if the registers do not encode a valid transport configuration.
- A single transport instance if a non-redundant configuration is defined.
- An instance of :class:`pyuavcan.transport.RedundantTransport` if more than one transport
configuration is defined.
If True, then the returned instance is always of type :class:`pyuavcan.transport.RedundantTransport`,
where the set of inferiors is empty if no transport configuration is defined.
This case is intended for applications that may want to change the transport configuration afterwards.
:return:
None if no transport is configured AND ``reconfigurable`` is False.
Otherwise, a functional transport instance is returned.
:raises:
- :class:`pyuavcan.application.register.MissingRegisterError` if a register is expected but cannot be found.
- :class:`pyuavcan.application.register.ValueConversionError` if a register is found but its value
cannot be converted to the correct type.
.. doctest::
:hide:
>>> import tests
>>> tests.asyncio_allow_event_loop_access_from_top_level()
>>> from pyuavcan.application.register import ValueProxy, Natural16, Natural32
>>> reg = {
... "uavcan.udp.iface": ValueProxy("127.99.0.0"),
... "uavcan.node.id": ValueProxy(Natural16([257])),
... }
>>> tr = make_transport(reg)
>>> tr
UDPTransport('127.99.1.1', local_node_id=257, ...)
>>> tr.close()
>>> tr = make_transport(reg, reconfigurable=True) # Same but reconfigurable.
>>> tr # Wrapped into RedundantTransport.
RedundantTransport(UDPTransport('127.99.1.1', local_node_id=257, ...))
>>> tr.close()
>>> int(reg["uavcan.udp.mtu"]) # Defaults created automatically to expose all configurables.
1200
>>> int(reg["uavcan.can.mtu"])
64
>>> reg["uavcan.can.bitrate"].ints
[1000000, 4000000]
>>> reg = { # Triply-redundant heterogeneous transport:
... "uavcan.udp.iface": ValueProxy("127.99.0.15 127.111.0.15"), # Double UDP transport
... "uavcan.serial.iface": ValueProxy("socket://127.0.0.1:50905"), # Serial transport
... }
>>> tr = make_transport(reg) # The node-ID was not set, so the transport is anonymous.
>>> tr # doctest: +NORMALIZE_WHITESPACE
RedundantTransport(UDPTransport('127.99.0.15', local_node_id=None, ...),
UDPTransport('127.111.0.15', local_node_id=None, ...),
SerialTransport('socket://127.0.0.1:50905', local_node_id=None, ...))
>>> tr.close()
>>> reg = {
... "uavcan.can.iface": ValueProxy("virtual: virtual:"), # Doubly-redundant CAN
... "uavcan.can.mtu": ValueProxy(Natural16([32])),
... "uavcan.can.bitrate": ValueProxy(Natural32([500_000, 2_000_000])),
... "uavcan.node.id": ValueProxy(Natural16([123])),
... }
>>> tr = make_transport(reg)
>>> tr # doctest: +NORMALIZE_WHITESPACE
RedundantTransport(CANTransport(PythonCANMedia('virtual:', mtu=32), local_node_id=123),
CANTransport(PythonCANMedia('virtual:', mtu=32), local_node_id=123))
>>> tr.close()
>>> reg = {
... "uavcan.udp.iface": ValueProxy("127.99.1.1"), # Per the standard register specs,
... "uavcan.node.id": ValueProxy(Natural16([0xFFFF])), # 0xFFFF means unset/anonymous.
... }
>>> tr = make_transport(reg)
>>> tr
UDPTransport('127.99.1.1', local_node_id=None, ...)
>>> tr.close()
>>> tr = make_transport({})
>>> tr is None
True
>>> tr = make_transport({}, reconfigurable=True)
>>> tr # Redundant transport with no inferiors.
RedundantTransport()
"""
def init(name: str, default: RelaxedValue) -> ValueProxy:
return registers.setdefault("uavcan." + name, ValueProxy(default))
# Per Specification, if uavcan.node.id = 65535, the node-ID is unspecified.
node_id: Optional[int] = int(init("node.id", Natural16([0xFFFF])))
# TODO: currently, we raise an error if the node-ID setting exceeds the maximum allowed value for the current
# transport, but the spec recommends that we should handle this as if the node-ID was not set at all.
if node_id is not None and not (0 <= node_id < 0xFFFF):
node_id = None
transports = list(itertools.chain(*(f(registers, node_id) for f in _SPECIALIZATIONS)))
assert all(isinstance(t, pyuavcan.transport.Transport) for t in transports)
if not reconfigurable:
if not transports:
return None
if len(transports) == 1:
return transports[0]
from pyuavcan.transport.redundant import RedundantTransport
red = RedundantTransport()
for tr in transports:
red.attach_inferior(tr)
return red
def _make_udp(
registers: MutableMapping[str, ValueProxy], node_id: Optional[int]
) -> Iterator[pyuavcan.transport.Transport]:
def init(name: str, default: RelaxedValue) -> ValueProxy:
return registers.setdefault("uavcan.udp." + name, ValueProxy(default))
ip_list = str(init("iface", "")).split()
mtu = int(init("mtu", Natural16([1200])))
srv_mult = int(init("duplicate_service_transfers", False)) + 1
if ip_list:
from pyuavcan.transport.udp import UDPTransport
for ip in ip_list:
yield UDPTransport(ip, node_id, mtu=mtu, service_transfer_multiplier=srv_mult)
def _make_serial(
registers: MutableMapping[str, ValueProxy], node_id: Optional[int]
) -> Iterator[pyuavcan.transport.Transport]:
def init(name: str, default: RelaxedValue) -> ValueProxy:
return registers.setdefault("uavcan.serial." + name, ValueProxy(default))
port_list = str(init("iface", "")).split()
srv_mult = int(init("duplicate_service_transfers", False)) + 1
baudrate = int(init("baudrate", Natural32([0]))) or None
if port_list:
from pyuavcan.transport.serial import SerialTransport
for port in port_list:
yield SerialTransport(str(port), node_id, service_transfer_multiplier=srv_mult, baudrate=baudrate)
def _make_can(
registers: MutableMapping[str, ValueProxy], node_id: Optional[int]
) -> Iterator[pyuavcan.transport.Transport]:
def init(name: str, default: RelaxedValue) -> ValueProxy:
return registers.setdefault("uavcan.can." + name, ValueProxy(default))
iface_list = str(init("iface", "")).split()
mtu = int(init("mtu", Natural16([64])))
br_arb, br_data = init("bitrate", Natural32([1_000_000, 4_000_000])).ints
if iface_list:
from pyuavcan.transport.can import CANTransport
for iface in iface_list:
media: pyuavcan.transport.can.media.Media
if iface.lower().startswith("socketcan:"):
from pyuavcan.transport.can.media.socketcan import SocketCANMedia
media = SocketCANMedia(iface.split(":")[-1], mtu=mtu)
else:
from pyuavcan.transport.can.media.pythoncan import PythonCANMedia
media = PythonCANMedia(iface, br_arb if br_arb == br_data else (br_arb, br_data), mtu)
yield CANTransport(media, node_id)
def _make_loopback(
registers: MutableMapping[str, ValueProxy], node_id: Optional[int]
) -> Iterator[pyuavcan.transport.Transport]:
# Not sure if exposing this is a good idea because the loopback transport is hardly useful outside of test envs.
if registers.setdefault("uavcan.loopback", ValueProxy(False)):
from pyuavcan.transport.loopback import LoopbackTransport
yield LoopbackTransport(node_id)
_SPECIALIZATIONS: Sequence[
Callable[[MutableMapping[str, ValueProxy], Optional[int]], Iterator[pyuavcan.transport.Transport]]
] = [v for k, v in globals().items() if callable(v) and k.startswith("_make_")]
assert len(_SPECIALIZATIONS) >= 4
|
|
#!/usr/bin/env python
import json
import sys
from os import path
import re
from markdown.extensions.toc import slugify
if __package__ is None:
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from apib_extra_parse_utils import get_nested_parameter_values_description
from apib_extra_parse_utils import parse_to_markdown
else:
from ..apib_extra_parse_utils import get_nested_parameter_values_description
from ..apib_extra_parse_utils import parse_to_markdown
from data_structures import parser_json_data_structures
from instantiate_body import instantiate_all_example_body
from instantiate_uri import instantiate_request_uri_templates
from metadata import parse_meta_data
from order_uri import order_uri_template_of_json
def extract_markdown_header_dict(markdown_header):
"""Returns a dict with the elements of a given Markdown header (for resources or actions)"""
markdown_header = markdown_header.lstrip('#').strip()
p = re.compile("(.*) \[(\w*) (.*)\]")
header_dict = {}
if p.match( markdown_header ):
header_groups = p.match(markdown_header).groups()
header_dict['name'] = header_groups[0]
header_dict['method'] = header_groups[1]
header_dict['uriTemplate'] = header_groups[2]
else:
p = re.compile("(.*) \[(.*)\]")
header_groups = p.match( markdown_header ).groups()
header_dict['name'] = header_groups[0]
header_dict['uriTemplate'] = header_groups[1]
return header_dict
def add_nested_parameter_description_to_json(API_blueprint_file_path, json_content):
"""Extracts all nested description for`parameter values and adds them to the JSON.
Arguments:
API_specification_path -- path to the specification file where all the links will be extracted from.
json_content -- JSON object where all the links will be added.
"""
nested_descriptions_list = get_nested_parameter_values_description(API_blueprint_file_path)
for nested_description in nested_descriptions_list:
for parameter in nested_description["parameters"]:
for value in parameter["values"]:
add_description_to_json_parameter_value(json_content,
nested_description["parent"],
parameter["name"],
value["name"],
value["description"])
def add_description_to_json_parameter_value(json_content, resource_or_action_markdown_header, parameter_name, value_name, value_description):
""""""
wanted_object = extract_markdown_header_dict( resource_or_action_markdown_header)
found_object = None
if 'method' in wanted_object:
for resource_group in json_content['resourceGroups']:
for resource in resource_group['resources']:
for action in resource['actions']:
if( action['name'] == wanted_object['name'] and action['method'] == wanted_object['method'] and action['attributes']['uriTemplate'] == wanted_object['uriTemplate'] ):
found_object = action
break
else:
for resource_group in json_content['resourceGroups']:
for resource in resource_group['resources']:
if resource['name'] == wanted_object['name'] and resource['uriTemplate'] == wanted_object['uriTemplate']:
found_object = resource
break
if found_object != None:
add_description_to_json_object_parameter_value(found_object, parameter_name, value_name, value_description)
def add_description_to_json_object_parameter_value(JSON_object, parameter_name, value_name, value_description):
""""""
value_object = None
for object_parameter in JSON_object['parameters']:
if object_parameter['name'] == parameter_name:
for parameter_value in object_parameter['values']:
if parameter_value['value'] == value_name:
value_object = parameter_value
if value_object != None:
value_object['description'] = value_description
def get_links_from_description(description):
"""Find via regex all the links in a description string"""
link_regex = re.compile( r"\[(?P<linkText>[^\(\)\[\]]*)\]\((?P<linkRef>[^\(\)\[\]]*)\)" )
auto_link_regex = re.compile(r"\<(?P<linkRef>http[s]?://[^\"]*)\>")
html_link_regex = re.compile(r"\<a href=\"(?P<linkRef>http[s]?://[^\"]*)\"\>(?P<linkText>[^\<]*)\</a>")
links = []
link_matches = link_regex.findall(description)
if link_matches:
for link_match in link_matches:
link = {}
link['title'] = link_match[0]
link['url'] = link_match[1]
links.append(link)
else:
link_matches = auto_link_regex.findall(description)
if link_matches:
for link_match in link_matches:
link = {}
link['title'] = link_match
link['url'] = link_match
links.append(link)
else:
link_matches = html_link_regex.findall(description)
if link_matches:
for link_match in link_matches:
link = {}
link['title'] = link_match[1]
link['url'] = link_match[0]
links.append(link)
return links
def get_links_api_metadata(section):
"""Recursively get links from the api_metadata json section."""
links = []
links += get_links_from_description(section["body"])
for subsection in section["subsections"]:
links += get_links_api_metadata(subsection)
return links
def parse_json_description(JSON_element, links):
"""Search for a 'decription' key in the current object and parse ti as markdown
Arguments:
JSON_element -- JSON element to iterate and parse
links - List of links gathered from the descriptions
"""
if type(JSON_element) is dict:
for key in JSON_element:
if key == "description":
JSON_element[key] = parse_to_markdown(JSON_element[key])
for link in get_links_from_description(JSON_element[key]):
if link not in links:
links.append(link)
else:
JSON_element[key] = parse_json_description(JSON_element[key], links)
elif type(JSON_element) is list:
for key in range(len(JSON_element)):
JSON_element[key] = parse_json_description(JSON_element[key], links)
return JSON_element
def add_metadata_to_json(metadata, json_content):
"""Adds metadata values to a json object
Arguments:
metadata -- Metadata values in JSON format
json_content -- JSON object
"""
json_content['api_metadata'] = {}
for metadataKey in metadata:
json_content['api_metadata'][metadataKey] = metadata[metadataKey]
def parse_json_descriptions_and_get_links(json_content):
"""Gets the descriptions of resources and actions and parses them as markdown. Saves the result in the same JSON file.
Arguments:
json_content -- JSON object containing the parsed apib.
"""
links = []
for metadatum in json_content['metadata']:
if 'APIARY_PROJECT' == metadatum['name']:
link = {"title": "Apiary project",
"url": "http://docs.{}.apiary.io/#reference".format(metadatum['value'])}
links.append(link)
if 'GITHUB_SOURCE' == metadatum['name']:
link = {"title": "Github source", "url":metadatum['value']}
links.append(link)
# Abstract
for link in get_links_from_description(json_content["description"]):
if link not in links: links.append(link)
# API Metadata
for link in get_links_api_metadata(json_content["api_metadata"]):
if link not in links: links.append(link)
json_content = parse_json_description(json_content, links)
return links
def find_and_mark_empty_resources(json_content):
"""Makes a resource able to be ignored by emprtying its title.
When a resource has only one action and they share names, the APIB declared an action witohut parent resource.
"""
for resource_group in json_content["resourceGroups"]:
for resource in resource_group["resources"]:
if len(resource["actions"]) == 1:
if resource["actions"][0]["name"] == resource["name"]:
resource["ignoreTOC"] = True
else:
resource["ignoreTOC"] = False
def render_description(json_content):
"""Escaping ampersand symbol form URIs.
Arguments:
JSON_file_path -- path to the JSON file where the ampersand will be be escaped in URIs.
"""
json_content["description"] = parse_to_markdown(json_content["description"])
def escape_requests_responses_json(json_content):
"""Identifies when the body of a request or response uses an XML like type and escapes the '<' for browser rendering.
Arguments:
json_content -- JSON content where requests and responses with XML like body will be escaped.
"""
for resource_group in json_content["resourceGroups"]:
for resource in resource_group["resources"]:
for action in resource["actions"]:
for example in action["examples"]:
for request in example["requests"]:
if request["body"]:
request["body"] = request["body"].replace("<", "<")
if not "sections" in request["content"][0]:
request["content"][0]["content"] = request["content"][0]["content"].replace("<", "<")
for response in example["responses"]:
if response["body"]:
response["body"] = response["body"].replace("<", "<")
if not "sections" in response["content"][0]:
response["content"][0]["content"] = response["content"][0]["content"].replace("<", "<")
def escape_ampersand_uri_templates(json_content):
"""Renders the description of the API spscification to display it properly.
Arguments:
json_content - json object containing the content to be replaced.
"""
if(isinstance(json_content, dict)):
for key, value in json_content.iteritems():
if isinstance(value, dict) or isinstance(value, list):
escape_ampersand_uri_templates(value)
elif key == 'uriTemplate':
json_content[key] = json_content[key].replace('&', '&')
elif(isinstance(json_content,list)):
for value in json_content:
if isinstance(value, dict) or isinstance(value, list):
escape_ampersand_uri_templates(value)
def generate_resources_and_action_ids(json_content):
"""Generate an ID for every resource and action in the given JSON file
Arguments:
json_content - JSON object containing the API parsed definition"""
for resource_group in json_content["resourceGroups"]:
for resource in resource_group["resources"]:
if len( resource["name"] ) > 0:
resource["id"] = 'resource_' + slugify( resource["name"], '-' )
else:
resource["id"] = 'resource_' + slugify( resource["uriTemplate"], '-' )
for action in resource["actions"]:
if len( action["name"] ) > 0:
action["id"] = 'action_' + slugify( action["name"],'-' )
else:
if len( action["attributes"]["uriTemplate"] ) > 0:
action["id"] = 'action_' + slugify( action["attributes"]["uriTemplate"], '-' )
else:
if resource["ignoreTOC"] == True:
action["id"] = 'action_' + slugify( resource["uriTemplate"] + action["method"], '-' )
else:
action["id"] = 'action_' + slugify( resource["name"] + action["method"], '-' )
def remove_redundant_spaces(json_content):
"""Remove redundant spaces from names of resources and actions
Arguments:
json_content - a JSON object containing the API parsed definition"""
for resource_group in json_content["resourceGroups"]:
resource_group["name"] = re.sub( " +", " ", resource_group["name"] )
for resource in resource_group["resources"]:
resource["name"] = re.sub( " +", " ", resource["name"] )
for action in resource["actions"]:
action["name"] = re.sub( " +", " ", action["name"] )
def postprocess_drafter_json(JSON_file_path, API_blueprint_file_path, API_extra_sections_file_path, is_PDF):
"""Apply a set of modifications to a JSON file containing an API specification"""
with open(JSON_file_path, 'rU') as json_file:
json_content = json.load(json_file)
add_metadata_to_json(parse_meta_data(API_extra_sections_file_path), json_content)
add_nested_parameter_description_to_json(API_blueprint_file_path, json_content)
links = parse_json_descriptions_and_get_links(json_content)
json_content['reference_links'] = links
instantiate_request_uri_templates(json_content)
order_uri_template_of_json(json_content)####--##
parser_json_data_structures(json_content)
find_and_mark_empty_resources(json_content)
render_description(json_content)
escape_requests_responses_json(json_content)
escape_ampersand_uri_templates(json_content)
generate_resources_and_action_ids(json_content)
remove_redundant_spaces(json_content)
instantiate_all_example_body(json_content)##
json_content['is_PDF'] = is_PDF
with open(JSON_file_path, 'w') as json_file:
json.dump(json_content, json_file, indent=4)
|
|
#!/usr/bin/env python
# Author : Pierre Schnizer
"""
Wrapper for the ode solver of gsl. This solver wraps all features as descirbed
in Chapter 25 of the gsl documentation.
The _odeiv file provides the low level wrapper. Direct usage at your special
own risk.
Here is the pythonic version of the example from the gsl documentation.
import odeiv
mu = 10.0
def func(t, y):
f = Numeric.zeros((2,), Numeric.Float) * 1.0
f[0] = y[1]
f[1] = -y[0] - mu * y[1] * (y[0] ** 2 -1);
return f
def jac(t, y):
dfdy = Numeric.zeros((2,2), Numeric.Float)
dfdy[0, 0] = 0.0
dfdy[0, 1] = 1.0
dfdy[1, 0] = -2.0 * mu * y[0] * y[1] - 1.0
dfdy[1, 1] = -mu * (y[0]**2 - 1.0)
dfdt = Numeric.zeros((2,))
return dfdy, dfdt
dimension = 2
step = odeiv.step_gear1(dimension, func, jac)
control = odeiv.control_y_new(step, 1e-6, 1e-6)
evolve = odeiv.evolve(step, control, dimension)
h = 1
t = 0.0
t1 = 100.0
y = (1.0, 0.0)
while t<t1:
t, h, y = evolve.apply(t, t1, h, y)
print t, y[0], y[1]
"""
#
# Copyright (c) 2002 by Pierre Schnizer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
##
# author: Pierre Schnizer
# created: December 2002
# file: pygsl/src/odeiv/odeiv.py
import _callback
class __step:
"""
The lowest level components are the stepping functions which advance a
solution from time t to t+h for a fixed step-size h and estimate the
resulting local error.
Pure virtual class. Use a derived Object instead.
These objects are:
step_rk2
step_rk4
step_rkf45
step_rkck
step_rk8pd
step_rk2imp
step_rk4imp
step_bsimp
step_gear1
step_gear2
"""
def __init__(self, dims, func, jac=None, args=None):
"""
dimension ... the dimension of the system
func ... the system descirbing the function
jac ... the jacobian matrix. optional
args ... additional arguments to pass to the function. optional
"""
self.ptr = None
if not hasattr(self, 'type'):
raise TypeError, """You can not use step directly. You should use
one of the derived classes!"""
self.ptr = _callback.gsl_odeiv_step_alloc(self.type, dims)
self.func = func
if jac == None:
if self.need_jacobian >= 1:
raise ValueError, """This step object must use an jacobian
matrix!"""
self.jac = None
else:
self.jac = jac
self.args = args
def __del__(self):
if hasattr(self, 'ptr'):
if self.ptr != None:
_callback.gsl_odeiv_step_free(self.ptr)
def reset(self):
_callback.gsl_odeiv_step_reset(self.ptr)
def apply(self, t, h, y_in, dydt):
"""
Input t, h, y_in, dydt, func, jac:
t ... start time t
h ... step size
y_in ... start vector
dydt ... derivatives of the system at t. If not known supply None
Output:
y, yerr, dydt:
y_out ... vector at t+h
yerr ... the estimate of the absolute errors
dydt ... the derivatives of the system at t
This method applies the stepping function to the system of equations
defined by func and jac, using the step size h to advance the system
from time t and state y to time t+h. The new state of the system is
stored in y_out on output, with an estimate of the absolute error in
each component stored in yerr. If the argument dydt_in is not None it
should provide an array containing the derivatives for the system at
time t on input. This is optional as the derivatives will be computed
internally if they are not provided, but allows the reuse of existing
derivative information. On output the new derivatives of the system at
time t+h will be stored in given in.
"""
return _callback.gsl_odeiv_step_apply(self.ptr, t, h, y_in, dydt,
self.func, self.jac, self.args)
def order(self):
"""
This method returns the order of the stepping function on the previous
step. This order can vary if the stepping function itself is adaptive.
"""
return _callback.gsl_odeiv_step_order(self.ptr)
def name(self):
"""
This function returns the name of the stepping function.
"""
return _callback.gsl_odeiv_step_name(self.ptr)
def _get_ptr(self):
return self.ptr
def _get_func(self):
return self.func
def _get_jac(self):
return self.jac
def _get_args(self):
return self.args
class step_rk2(__step):
"""
Embedded 2nd order Runge-Kutta with 3rd order error estimate.
"""
type = _callback.cvar.gsl_odeiv_step_rk2
need_jacobian = 0
class step_rk4(__step):
"""
4th order (classical) Runge-Kutta.
"""
type = _callback.cvar.gsl_odeiv_step_rk4
need_jacobian = 0
class step_rkf45(__step):
"""
Embedded 4th order Runge-Kutta-Fehlberg method with 5th order error
estimate. This method is a good general-purpose integrator.
"""
type = _callback.cvar.gsl_odeiv_step_rkf45
need_jacobian = 0
class step_rkck(__step):
"""
Embedded 4th order Runge-Kutta Cash-Karp method with 5th order error
estimate.
"""
type = _callback.cvar.gsl_odeiv_step_rkck
need_jacobian = 0
class step_rk8pd(__step):
"""
Embedded 8th order Runge-Kutta Prince-Dormand method with 9th order error
estimate.
"""
type = _callback.cvar.gsl_odeiv_step_rk8pd
need_jacobian = 0
class step_rk2imp(__step):
"""
Implicit 2nd order Runge-Kutta at Gaussian points
"""
type = _callback.cvar.gsl_odeiv_step_rk2imp
need_jacobian = 0
class step_rk4imp(__step):
"""
Implicit 4th order Runge-Kutta at Gaussian points
"""
type = _callback.cvar.gsl_odeiv_step_rk4imp
need_jacobian = 0
class step_bsimp(__step):
"""
Implicit Bulirsch-Stoer method of Bader and Deuflhard. This algorithm
requires the Jacobian.
"""
type = _callback.cvar.gsl_odeiv_step_bsimp
need_jacobian = 1
class step_gear1(__step):
"""
M=1 implicit Gear method
"""
type = _callback.cvar.gsl_odeiv_step_gear1
need_jacobian = 0
class step_gear2(__step):
"""
M=2 implicit Gear method
"""
type = _callback.cvar.gsl_odeiv_step_gear2
need_jacobian = 0
HADJ_DEC = _callback.gsl_odeiv_hadj_dec
HADJ_INC = _callback.gsl_odeiv_hadj_inc
HADJ_NIL = _callback.gsl_odeiv_hadj_nil
class __control:
"""
The control function examines the proposed change to the solution and its
error estimate produced by a stepping function and attempts to determine
the optimal step-size for a user-specified level of error.
Pure virtual class for the control.
Use either control_standard_new or control_y_new or control_yp_new
"""
def __del__(self):
if hasattr(self, 'ptr'):
if self.ptr != None:
_callback.gsl_odeiv_control_free(self.ptr)
def hadjust(self, y, yerr, dydt, h):
"""
input: y, yerr, dydt
y ...
yerr ... the error estimate
dydt ...
h ... last step size
output: h, msg
h ... new step size
msg ... HADJ_DEC or HADJ_INC or HADJ_NIL. See text.
This method adjusts the step-size h using the current values of y,
yerr and dydt. If the error in the y-values yerr is found to be too
large then the step-size h is reduced and the function returns
HADJ_DEC. If the error is sufficiently small then h may be increased
and HADJ_INC is returned. The function returns HADJ_NIL if the
step-size is unchanged. The goal of the function is to estimate the
largest step-size which satisfies the user-specified accuracy
requirements for the current point.
"""
step = self.step._get_ptr()
h, msg = _callback.gsl_odeiv_control_hadjust(self.ptr, step, y, yerr,
dydt, h)
return h, msg
def name(self):
return _callback.gsl_odeiv_control_name(self.ptr)
def _get_ptr(self):
return self.ptr
class control_standard_new(__control):
"""
The standard control object is a four parameter
heuristic based on absolute and relative errors eps_abs and eps_rel, and
scaling factors a_y and a_dydt for the system state y(t) and derivatives
y'(t) respectively.
The step-size adjustment procedure for this method begins by computing the
desired error level D_i for each component,
D_i = eps_abs + eps_rel * (a_y |y_i| + a_dydt h |y'_i|)
and comparing it with the observed error E_i = |yerr_i|. If the observed
error E exceeds the desired error level D by more than 10% for any
component then the method reduces the step-size by an appropriate factor,
h_new = h_old * S * (D/E)^(1/q)
where q is the consistency order of method (e.g. q=4 for 4(5) embedded
RK), and S is a safety factor of 0.9. The ratio D/E is taken to be the
maximum of the ratios D_i/E_i.
If the observed error E is less than 50% of the desired error level D for
the maximum ratio D_i/E_i then the algorithm takes the opportunity to
increase the step-size to bring the error in line with the desired level,
h_new = h_old * S * (E/D)^(1/(q+1))
This encompasses all the standard error scaling methods.
"""
def __init__(self, step, eps_abs, eps_rel, a_y, a_dydt):
"""
input : eps_abs, eps_rel, a_y, a_dydt
See the docstring of this class for the meaning of the input.
"""
self.step = step
self.ptr = None
self.ptr = _callback.gsl_odeiv_control_standard_new(eps_abs, eps_rel,
a_y, a_dydt)
class control_y_new(__control):
"""
Creates a new control object which will keep the local error on each step
within an absolute error of eps_abs and relative error of eps_rel with
respect to the solution y_i(t). This is equivalent to the standard control
object with a_y=1 and a_dydt=0.
See also the documentation of the control_standard_new class
"""
def __init__(self, step, eps_abs, eps_rel):
"""
input : eps_abs, eps_rel
See the docstring of this class for the meaning of the input.
"""
self.step = step
self.ptr = None
self.ptr = _callback.gsl_odeiv_control_y_new(eps_abs, eps_rel)
class control_yp_new(__control):
"""
This function creates a new control object which will keep the local error
on each step within an absolute error of eps_abs and relative error of
eps_rel with respect to the derivatives of the solution y'_i(t) . This is
equivalent to the standard control object with a_y=0 and a_dydt=1.
"""
def __init__ (self, step, eps_abs, eps_rel):
"""
input : eps_abs, eps_rel
See the docstring of this class for the meaning of the input.
"""
self.step = step
self.ptr = None
self.ptr = _callback.gsl_odeiv_control_yp_new(eps_abs, eps_rel)
class evolve:
"""
The highest level of the system is the evolution function which combines
the results of a stepping function and control function to reliably
advance the solution forward over an interval (t_0, t_1). If the control
function signals that the step-size should be decreased the evolution
function backs out of the current step and tries the proposed smaller
step-size. This is process is continued until an acceptable step-size is
found.
"""
def __init__(self, step, control, dimension):
"""
input: step, control, dimension
step ... a step object
control ... a control object
dimension ... dimension of the problem
"""
# Keep a reference to the objects so that its pointers are valid
self.step = step
self.control = control
self.ptr = None
self.ptr = _callback.gsl_odeiv_evolve_alloc(dimension)
self.func = self.step._get_func()
self.jac = self.step._get_jac()
self.args = self.step._get_args()
tmp = self.step._get_ptr(), self.control._get_ptr(), self.ptr
self._solvers_tuple = tuple(tmp)
def __del__(self):
if hasattr(self, 'ptr'):
if self.ptr != None:
_callback.gsl_odeiv_evolve_free(self.ptr)
def reset(self):
"""
No input. No output
This method resets the evolution. It should be used whenever
the next use will not be a continuation of a previous step.
"""
_callback.gsl_odeiv_evolve_reset(self.ptr)
def apply(self, t, t1, h, y):
"""
input : t, t1, h, y
t ... start time
t1 ... end time
h ... initial step size
y ... start vector
output :
t ... reached time in the calculation
h ... reached step size
y ... end vector
This method advances the system from time t and position y using the
stepping function step. The new time and position are stored in t and
y on output. The initial step-size is taken as h, but this will be
modified to achieve the appropriate error bound if necessary. The
routine may make several calls to the step object in order to
determine the optimum step-size. If the step-size has been changed the
value of h will be modified on output. The maximum time t1 is
guaranteed not to be exceeded by the time-step. On the final
time-step the value of t will be set to t1 exactly.
"""
tmp = _callback.gsl_odeiv_evolve_apply(self._solvers_tuple,
self.func, self.jac, t, t1, h,
y, self.args)
return tmp
def apply_vector(self, t, t1, h, y, nsteps=1, hmax=None):
res = (nsteps,)
if hmax != None:
res = nsteps, hmax
tmp = _callback.gsl_odeiv_evolve_apply_vector(self._solvers_tuple,
self.func, self.jac, t, t1, h,
y, self.args, *res)
return tmp
|
|
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile, sys, os
print "eval 3+3=", eval("3+3")
print "eval 4+4=", eval(" 4+4")
def functionEval1():
return eval(" 5+5")
print "eval in a function with nothing provided", functionEval1()
def functionEval2():
a = [2]
g = {}
r = eval("1+3", g)
return r, g.keys(), a
print "eval in a function with globals provided", functionEval2()
def functionEval3():
result = []
for x in eval("(1,2)"):
result.append(x)
return result
print "eval in a for loop as iterator giver", functionEval3()
print "exec on a global level",
exec( "d=2+2" )
print "2+2=",d # @UndefinedVariable
def functionExec1():
a = 1
code = "a=2"
exec( code )
return a
def functionExec2():
a = 1
code = "a=2"
exec code in globals(), locals()
return a
print "exec in function without and with locals() provided:", functionExec1(), functionExec2()
tmp_filename = tempfile.gettempdir() + "/execfile.py"
f = open(tmp_filename, 'w')
f.write("e=7\nf=8\n")
f.close()
execfile(tmp_filename)
print "execfile with defaults f,g=", e, f # @UndefinedVariable
global_vars = { 'e' : '0', 'f' : 0 }
local_vars = dict(global_vars)
execfile(tmp_filename, global_vars)
print "execfile with globals dict:", global_vars.keys()
execfile(tmp_filename, global_vars, local_vars)
print "execfile with globals and locals dict:", local_vars
def functionExecfile():
e = 0
f = 0
global_vars = { 'e' : '0', 'f' : 0 }
local_vars = dict(global_vars)
print "execfile with globals and locals dict in a function:",
x = execfile(tmp_filename, global_vars, local_vars)
print x,
print global_vars.keys(), local_vars, e, f
functionExecfile()
class classExecfile:
e = 0
f = 0
print "execfile in a class:",
# TODO: Won't work yet, Issue#5
# print execfile( tmp_filename ),
execfile(tmp_filename)
print "execfile changed local values:", e, f
f = 7
def functionExecNonesTuple():
f = 0
exec("f=1", None, None)
print "Exec with None as optimizable tuple args did update locals:", f
def functionExecNonesSyntax():
f = 0
exec "f=2" in None, None
print "Exec with None as optimizable normal args did update locals:", f
functionExecNonesTuple()
functionExecNonesSyntax()
print "Global value is untouched", f
def functionEvalNones2():
f = 11
code = 'f'
g = None
l = None
f1 = eval (code, l, g)
print "Eval with None arguments from variables did access locals:", f1
functionEvalNones2()
def functionExecNonesTuple2():
f = 0
code = "f=1"
g = None
l = None
exec(code, l, g)
print "Exec with None as tuple args from variable did update locals:", f
def functionExecNonesSyntax2():
f = 0
code = "f=2"
g = None
l = None
exec code in l, g
print "Exec with None as normal args did update locals:", f
functionExecNonesTuple2()
functionExecNonesSyntax2()
print "Exec with a future division definition and one without:"
exec """
from __future__ import division
from __future__ import print_function
print( "3/2 is with future division", 3/2 )
"""
exec """
from __future__ import print_function
print( "3/2 is without future division", 3/2 )
"""
x = 1
y = 1
def functionGlobalsExecShadow():
global x
print "Global x outside is", x
y = 0
print "Local y is initially", y
print "Locals initially", locals()
exec """
from __future__ import print_function
x = 2
print( "Exec local x is", x )
"""
print "Function global x referenced as local x in exec is", x
exec """
from __future__ import print_function
print( "Re-exec local x", x )
"""
print "Locals after exec assigning to local x", locals()
exec """
from __future__ import print_function
global x
x = 3
print( "Exec global x is inside exec", x )
"""
print "Global x referenced as global x in exec is", x
exec """
from __future__ import print_function
def change_y():
global y
y = 4
print( "Exec function global y is", y )
y = 7
change_y()
# TODO: The below will not work
print( "Exec local y is", y )
"""
# print "Local y is afterwards", y
def print_global_y():
global y
# TODO: The below will not work
print "Global y outside", y
print_global_y()
print "Outside y", y
functionGlobalsExecShadow()
def functionWithClosureProvidedByExec():
code = "ValueError = TypeError"
exec code in None, None
def func():
print "Closure from exec not used", ValueError
func()
functionWithClosureProvidedByExec()
x = 2
def functionWithExecAffectingClosure():
x = 4
code = "d=3;x=5"
space = locals()
exec code in space
def closureMaker():
return x
return d, closureMaker() # @UndefinedVariable
print "Closure in a function with exec to not none", functionWithExecAffectingClosure()
def generatorFunctionWithExec():
yield 1
code = "y = 2"
exec code
yield y
print "Exec in a generator function", tuple(generatorFunctionWithExec())
def evalInContractions():
r1 = list( eval(str(s)) for s in range(3) )
r2 = [ eval(str(s)) for s in range(4) ]
return r1, r2
print "Eval in a list contraction or generator expression", evalInContractions()
def execDefinesFunctionToLocalsExplicity():
exec """\
def makeAddPair(a, b):
def addPair(c, d):
return (a + c, b + d)
return addPair
""" in locals()
if sys.version_info < (3,):
assert makeAddPair # @UndefinedVariable
return "yes"
print "Exec adds functions declares in explicit locals() given.", execDefinesFunctionToLocalsExplicity()
os.unlink(tmp_filename)
def execWithShortTuple():
try:
exec("print hey",)
except Exception as e:
return "gives exception: " + repr(e)
print "Exec with too short tuple argument:", execWithShortTuple()
|
|
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""shell/term utilities, useful to write some python scripts instead of shell
scripts.
"""
__docformat__ = "restructuredtext en"
import os
import glob
import shutil
import stat
import sys
import tempfile
import time
import fnmatch
import errno
import string
import random
from os.path import exists, isdir, islink, basename, join
from logilab.common import STD_BLACKLIST, _handle_blacklist
from logilab.common.compat import raw_input
from logilab.common.compat import str_to_bytes
try:
from logilab.common.proc import ProcInfo, NoSuchProcess
except ImportError:
# windows platform
class NoSuchProcess(Exception): pass
def ProcInfo(pid):
raise NoSuchProcess()
class tempdir(object):
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, exctype, value, traceback):
# rmtree in all cases
shutil.rmtree(self.path)
return traceback is None
class pushd(object):
def __init__(self, directory):
self.directory = directory
def __enter__(self):
self.cwd = os.getcwd()
os.chdir(self.directory)
return self.directory
def __exit__(self, exctype, value, traceback):
os.chdir(self.cwd)
def chown(path, login=None, group=None):
"""Same as `os.chown` function but accepting user login or group name as
argument. If login or group is omitted, it's left unchanged.
Note: you must own the file to chown it (or be root). Otherwise OSError is raised.
"""
if login is None:
uid = -1
else:
try:
uid = int(login)
except ValueError:
import pwd # Platforms: Unix
uid = pwd.getpwnam(login).pw_uid
if group is None:
gid = -1
else:
try:
gid = int(group)
except ValueError:
import grp
gid = grp.getgrnam(group).gr_gid
os.chown(path, uid, gid)
def mv(source, destination, _action=shutil.move):
"""A shell-like mv, supporting wildcards.
"""
sources = glob.glob(source)
if len(sources) > 1:
assert isdir(destination)
for filename in sources:
_action(filename, join(destination, basename(filename)))
else:
try:
source = sources[0]
except IndexError:
raise OSError('No file matching %s' % source)
if isdir(destination) and exists(destination):
destination = join(destination, basename(source))
try:
_action(source, destination)
except OSError, ex:
raise OSError('Unable to move %r to %r (%s)' % (
source, destination, ex))
def rm(*files):
"""A shell-like rm, supporting wildcards.
"""
for wfile in files:
for filename in glob.glob(wfile):
if islink(filename):
os.remove(filename)
elif isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
def cp(source, destination):
"""A shell-like cp, supporting wildcards.
"""
mv(source, destination, _action=shutil.copy)
def find(directory, exts, exclude=False, blacklist=STD_BLACKLIST):
"""Recursively find files ending with the given extensions from the directory.
:type directory: str
:param directory:
directory where the search should start
:type exts: basestring or list or tuple
:param exts:
extensions or lists or extensions to search
:type exclude: boolean
:param exts:
if this argument is True, returning files NOT ending with the given
extensions
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all matching files
"""
if isinstance(exts, basestring):
exts = (exts,)
if exclude:
def match(filename, exts):
for ext in exts:
if filename.endswith(ext):
return False
return True
else:
def match(filename, exts):
for ext in exts:
if filename.endswith(ext):
return True
return False
files = []
for dirpath, dirnames, filenames in os.walk(directory):
_handle_blacklist(blacklist, dirnames, filenames)
# don't append files if the directory is blacklisted
dirname = basename(dirpath)
if dirname in blacklist:
continue
files.extend([join(dirpath, f) for f in filenames if match(f, exts)])
return files
def globfind(directory, pattern, blacklist=STD_BLACKLIST):
"""Recursively finds files matching glob `pattern` under `directory`.
This is an alternative to `logilab.common.shellutils.find`.
:type directory: str
:param directory:
directory where the search should start
:type pattern: basestring
:param pattern:
the glob pattern (e.g *.py, foo*.py, etc.)
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: iterator
:return:
iterator over the list of all matching files
"""
for curdir, dirnames, filenames in os.walk(directory):
_handle_blacklist(blacklist, dirnames, filenames)
for fname in fnmatch.filter(filenames, pattern):
yield join(curdir, fname)
def unzip(archive, destdir):
import zipfile
if not exists(destdir):
os.mkdir(destdir)
zfobj = zipfile.ZipFile(archive)
for name in zfobj.namelist():
if name.endswith('/'):
os.mkdir(join(destdir, name))
else:
outfile = open(join(destdir, name), 'wb')
outfile.write(zfobj.read(name))
outfile.close()
class Execute:
"""This is a deadlock safe version of popen2 (no stdin), that returns
an object with errorlevel, out and err.
"""
def __init__(self, command):
outfile = tempfile.mktemp()
errfile = tempfile.mktemp()
self.status = os.system("( %s ) >%s 2>%s" %
(command, outfile, errfile)) >> 8
self.out = open(outfile, "r").read()
self.err = open(errfile, "r").read()
os.remove(outfile)
os.remove(errfile)
def acquire_lock(lock_file, max_try=10, delay=10, max_delay=3600):
"""Acquire a lock represented by a file on the file system
If the process written in lock file doesn't exist anymore, we remove the
lock file immediately
If age of the lock_file is greater than max_delay, then we raise a UserWarning
"""
count = abs(max_try)
while count:
try:
fd = os.open(lock_file, os.O_EXCL | os.O_RDWR | os.O_CREAT)
os.write(fd, str_to_bytes(str(os.getpid())) )
os.close(fd)
return True
except OSError, e:
if e.errno == errno.EEXIST:
try:
fd = open(lock_file, "r")
pid = int(fd.readline())
pi = ProcInfo(pid)
age = (time.time() - os.stat(lock_file)[stat.ST_MTIME])
if age / max_delay > 1 :
raise UserWarning("Command '%s' (pid %s) has locked the "
"file '%s' for %s minutes"
% (pi.name(), pid, lock_file, age/60))
except UserWarning:
raise
except NoSuchProcess:
os.remove(lock_file)
except Exception:
# The try block is not essential. can be skipped.
# Note: ProcInfo object is only available for linux
# process information are not accessible...
# or lock_file is no more present...
pass
else:
raise
count -= 1
time.sleep(delay)
else:
raise Exception('Unable to acquire %s' % lock_file)
def release_lock(lock_file):
"""Release a lock represented by a file on the file system."""
os.remove(lock_file)
class ProgressBar(object):
"""A simple text progression bar."""
def __init__(self, nbops, size=20, stream=sys.stdout, title=''):
if title:
self._fstr = '\r%s [%%-%ss]' % (title, int(size))
else:
self._fstr = '\r[%%-%ss]' % int(size)
self._stream = stream
self._total = nbops
self._size = size
self._current = 0
self._progress = 0
self._current_text = None
self._last_text_write_size = 0
def _get_text(self):
return self._current_text
def _set_text(self, text=None):
if text != self._current_text:
self._current_text = text
self.refresh()
def _del_text(self):
self.text = None
text = property(_get_text, _set_text, _del_text)
def update(self):
"""Update the progression bar."""
self._current += 1
progress = int((float(self._current)/float(self._total))*self._size)
if progress > self._progress:
self._progress = progress
self.refresh()
def refresh(self):
"""Refresh the progression bar display."""
self._stream.write(self._fstr % ('.' * min(self._progress, self._size)) )
if self._last_text_write_size or self._current_text:
template = ' %%-%is' % (self._last_text_write_size)
text = self._current_text
if text is None:
text = ''
self._stream.write(template % text)
self._last_text_write_size = len(text.rstrip())
self._stream.flush()
def finish(self):
self._stream.write('\n')
self._stream.flush()
class DummyProgressBar(object):
__slot__ = ('text',)
def refresh(self):
pass
def update(self):
pass
def finish(self):
pass
_MARKER = object()
class progress(object):
def __init__(self, nbops=_MARKER, size=_MARKER, stream=_MARKER, title=_MARKER, enabled=True):
self.nbops = nbops
self.size = size
self.stream = stream
self.title = title
self.enabled = enabled
def __enter__(self):
if self.enabled:
kwargs = {}
for attr in ('nbops', 'size', 'stream', 'title'):
value = getattr(self, attr)
if value is not _MARKER:
kwargs[attr] = value
self.pb = ProgressBar(**kwargs)
else:
self.pb = DummyProgressBar()
return self.pb
def __exit__(self, exc_type, exc_val, exc_tb):
self.pb.finish()
class RawInput(object):
def __init__(self, input=None, printer=None):
self._input = input or raw_input
self._print = printer
def ask(self, question, options, default):
assert default in options
choices = []
for option in options:
if option == default:
label = option[0].upper()
else:
label = option[0].lower()
if len(option) > 1:
label += '(%s)' % option[1:].lower()
choices.append((option, label))
prompt = "%s [%s]: " % (question,
'/'.join([opt[1] for opt in choices]))
tries = 3
while tries > 0:
answer = self._input(prompt).strip().lower()
if not answer:
return default
possible = [option for option, label in choices
if option.lower().startswith(answer)]
if len(possible) == 1:
return possible[0]
elif len(possible) == 0:
msg = '%s is not an option.' % answer
else:
msg = ('%s is an ambiguous answer, do you mean %s ?' % (
answer, ' or '.join(possible)))
if self._print:
self._print(msg)
else:
print msg
tries -= 1
raise Exception('unable to get a sensible answer')
def confirm(self, question, default_is_yes=True):
default = default_is_yes and 'y' or 'n'
answer = self.ask(question, ('y', 'n'), default)
return answer == 'y'
ASK = RawInput()
def getlogin():
"""avoid using os.getlogin() because of strange tty / stdin problems
(man 3 getlogin)
Another solution would be to use $LOGNAME, $USER or $USERNAME
"""
if sys.platform != 'win32':
import pwd # Platforms: Unix
return pwd.getpwuid(os.getuid())[0]
else:
return os.environ['USERNAME']
def generate_password(length=8, vocab=string.ascii_letters + string.digits):
"""dumb password generation function"""
pwd = ''
for i in xrange(length):
pwd += random.choice(vocab)
return pwd
|
|
from six import binary_type
from typing import Any, AnyStr, Callable, Dict, Iterable, List, MutableMapping, Optional, Text
from django.conf import settings
from django.core.exceptions import DisallowedHost
from django.utils.translation import ugettext as _
from django.utils.deprecation import MiddlewareMixin
from zerver.lib.response import json_error, json_response_from_error
from zerver.lib.subdomains import get_subdomain
from zerver.lib.exceptions import JsonableError, ErrorCode
from django.db import connection
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from zerver.lib.utils import statsd
from zerver.lib.queue import queue_json_publish
from zerver.lib.cache import get_remote_cache_time, get_remote_cache_requests
from zerver.lib.bugdown import get_bugdown_time, get_bugdown_requests
from zerver.models import Realm, flush_per_request_caches, get_realm
from zerver.lib.exceptions import RateLimited
from django.contrib.sessions.middleware import SessionMiddleware
from django.views.csrf import csrf_failure as html_csrf_failure
from django.utils.cache import patch_vary_headers
from django.utils.http import cookie_date
from django.shortcuts import redirect, render
import logging
import time
import cProfile
import traceback
logger = logging.getLogger('zulip.requests')
def record_request_stop_data(log_data):
# type: (MutableMapping[str, Any]) -> None
log_data['time_stopped'] = time.time()
log_data['remote_cache_time_stopped'] = get_remote_cache_time()
log_data['remote_cache_requests_stopped'] = get_remote_cache_requests()
log_data['bugdown_time_stopped'] = get_bugdown_time()
log_data['bugdown_requests_stopped'] = get_bugdown_requests()
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].disable()
def async_request_stop(request):
# type: (HttpRequest) -> None
record_request_stop_data(request._log_data)
def record_request_restart_data(log_data):
# type: (MutableMapping[str, Any]) -> None
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].enable()
log_data['time_restarted'] = time.time()
log_data['remote_cache_time_restarted'] = get_remote_cache_time()
log_data['remote_cache_requests_restarted'] = get_remote_cache_requests()
log_data['bugdown_time_restarted'] = get_bugdown_time()
log_data['bugdown_requests_restarted'] = get_bugdown_requests()
def async_request_restart(request):
# type: (HttpRequest) -> None
if "time_restarted" in request._log_data:
# Don't destroy data when being called from
# finish_current_handler
return
record_request_restart_data(request._log_data)
def record_request_start_data(log_data):
# type: (MutableMapping[str, Any]) -> None
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"] = cProfile.Profile()
log_data["prof"].enable()
log_data['time_started'] = time.time()
log_data['remote_cache_time_start'] = get_remote_cache_time()
log_data['remote_cache_requests_start'] = get_remote_cache_requests()
log_data['bugdown_time_start'] = get_bugdown_time()
log_data['bugdown_requests_start'] = get_bugdown_requests()
def timedelta_ms(timedelta):
# type: (float) -> float
return timedelta * 1000
def format_timedelta(timedelta):
# type: (float) -> str
if (timedelta >= 1):
return "%.1fs" % (timedelta)
return "%.0fms" % (timedelta_ms(timedelta),)
def is_slow_query(time_delta, path):
# type: (float, Text) -> bool
if time_delta < 1.2:
return False
is_exempt = \
path in ["/activity", "/json/report/error",
"/api/v1/deployments/report_error"] \
or path.startswith("/realm_activity/") \
or path.startswith("/user_activity/")
if is_exempt:
return time_delta >= 5
if 'webathena_kerberos' in path:
return time_delta >= 10
return True
def write_log_line(log_data, path, method, remote_ip, email, client_name,
status_code=200, error_content=None, error_content_iter=None):
# type: (MutableMapping[str, Any], Text, str, str, Text, Text, int, Optional[AnyStr], Optional[Iterable[AnyStr]]) -> None
assert error_content is None or error_content_iter is None
if error_content is not None:
error_content_iter = (error_content,)
# For statsd timer name
if path == '/':
statsd_path = u'webreq'
else:
statsd_path = u"webreq.%s" % (path[1:].replace('/', '.'),)
# Remove non-ascii chars from path (there should be none, if there are it's
# because someone manually entered a nonexistent path), as UTF-8 chars make
# statsd sad when it sends the key name over the socket
statsd_path = statsd_path.encode('ascii', errors='ignore').decode("ascii")
blacklisted_requests = ['do_confirm', 'send_confirm',
'eventslast_event_id', 'webreq.content', 'avatar', 'user_uploads',
'password.reset', 'static', 'json.bots', 'json.users', 'json.streams',
'accounts.unsubscribe', 'apple-touch-icon', 'emoji', 'json.bots',
'upload_file', 'realm_activity', 'user_activity']
suppress_statsd = any((blacklisted in statsd_path for blacklisted in blacklisted_requests))
time_delta = -1
# A time duration of -1 means the StartLogRequests middleware
# didn't run for some reason
optional_orig_delta = ""
if 'time_started' in log_data:
time_delta = time.time() - log_data['time_started']
if 'time_stopped' in log_data:
orig_time_delta = time_delta
time_delta = ((log_data['time_stopped'] - log_data['time_started']) +
(time.time() - log_data['time_restarted']))
optional_orig_delta = " (lp: %s)" % (format_timedelta(orig_time_delta),)
remote_cache_output = ""
if 'remote_cache_time_start' in log_data:
remote_cache_time_delta = get_remote_cache_time() - log_data['remote_cache_time_start']
remote_cache_count_delta = get_remote_cache_requests() - log_data['remote_cache_requests_start']
if 'remote_cache_requests_stopped' in log_data:
# (now - restarted) + (stopped - start) = (now - start) + (stopped - restarted)
remote_cache_time_delta += (log_data['remote_cache_time_stopped'] -
log_data['remote_cache_time_restarted'])
remote_cache_count_delta += (log_data['remote_cache_requests_stopped'] -
log_data['remote_cache_requests_restarted'])
if (remote_cache_time_delta > 0.005):
remote_cache_output = " (mem: %s/%s)" % (format_timedelta(remote_cache_time_delta),
remote_cache_count_delta)
if not suppress_statsd:
statsd.timing("%s.remote_cache.time" % (statsd_path,), timedelta_ms(remote_cache_time_delta))
statsd.incr("%s.remote_cache.querycount" % (statsd_path,), remote_cache_count_delta)
startup_output = ""
if 'startup_time_delta' in log_data and log_data["startup_time_delta"] > 0.005:
startup_output = " (+start: %s)" % (format_timedelta(log_data["startup_time_delta"]))
bugdown_output = ""
if 'bugdown_time_start' in log_data:
bugdown_time_delta = get_bugdown_time() - log_data['bugdown_time_start']
bugdown_count_delta = get_bugdown_requests() - log_data['bugdown_requests_start']
if 'bugdown_requests_stopped' in log_data:
# (now - restarted) + (stopped - start) = (now - start) + (stopped - restarted)
bugdown_time_delta += (log_data['bugdown_time_stopped'] -
log_data['bugdown_time_restarted'])
bugdown_count_delta += (log_data['bugdown_requests_stopped'] -
log_data['bugdown_requests_restarted'])
if (bugdown_time_delta > 0.005):
bugdown_output = " (md: %s/%s)" % (format_timedelta(bugdown_time_delta),
bugdown_count_delta)
if not suppress_statsd:
statsd.timing("%s.markdown.time" % (statsd_path,), timedelta_ms(bugdown_time_delta))
statsd.incr("%s.markdown.count" % (statsd_path,), bugdown_count_delta)
# Get the amount of time spent doing database queries
db_time_output = ""
queries = connection.connection.queries if connection.connection is not None else []
if len(queries) > 0:
query_time = sum(float(query.get('time', 0)) for query in queries)
db_time_output = " (db: %s/%sq)" % (format_timedelta(query_time),
len(queries))
if not suppress_statsd:
# Log ms, db ms, and num queries to statsd
statsd.timing("%s.dbtime" % (statsd_path,), timedelta_ms(query_time))
statsd.incr("%s.dbq" % (statsd_path,), len(queries))
statsd.timing("%s.total" % (statsd_path,), timedelta_ms(time_delta))
if 'extra' in log_data:
extra_request_data = " %s" % (log_data['extra'],)
else:
extra_request_data = ""
logger_client = "(%s via %s)" % (email, client_name)
logger_timing = ('%5s%s%s%s%s%s %s' %
(format_timedelta(time_delta), optional_orig_delta,
remote_cache_output, bugdown_output,
db_time_output, startup_output, path))
logger_line = ('%-15s %-7s %3d %s%s %s' %
(remote_ip, method, status_code,
logger_timing, extra_request_data, logger_client))
if (status_code in [200, 304] and method == "GET" and path.startswith("/static")):
logger.debug(logger_line)
else:
logger.info(logger_line)
if (is_slow_query(time_delta, path)):
# Since the slow query worker patches code, we can't directly
# use call_consume_in_tests here without further work.
queue_json_publish("slow_queries", "%s (%s)" % (logger_line, email), lambda e: None)
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].disable()
profile_path = "/tmp/profile.data.%s.%s" % (path.split("/")[-1], int(time_delta * 1000),)
log_data["prof"].dump_stats(profile_path)
# Log some additional data whenever we return certain 40x errors
if 400 <= status_code < 500 and status_code not in [401, 404, 405]:
assert error_content_iter is not None
error_content_list = list(error_content_iter)
if error_content_list:
error_data = u''
elif isinstance(error_content_list[0], Text):
error_data = u''.join(error_content_list)
elif isinstance(error_content_list[0], binary_type):
error_data = repr(b''.join(error_content_list))
if len(error_data) > 100:
error_data = u"[content more than 100 characters]"
logger.info('status=%3d, data=%s, uid=%s' % (status_code, error_data, email))
class LogRequests(MiddlewareMixin):
# We primarily are doing logging using the process_view hook, but
# for some views, process_view isn't run, so we call the start
# method here too
def process_request(self, request):
# type: (HttpRequest) -> None
request._log_data = dict()
record_request_start_data(request._log_data)
if connection.connection is not None:
connection.connection.queries = []
def process_view(self, request, view_func, args, kwargs):
# type: (HttpRequest, Callable[..., HttpResponse], List[str], Dict[str, Any]) -> None
# process_request was already run; we save the initialization
# time (i.e. the time between receiving the request and
# figuring out which view function to call, which is primarily
# importing modules on the first start)
request._log_data["startup_time_delta"] = time.time() - request._log_data["time_started"]
# And then completely reset our tracking to only cover work
# done as part of this request
record_request_start_data(request._log_data)
if connection.connection is not None:
connection.connection.queries = []
def process_response(self, request, response):
# type: (HttpRequest, StreamingHttpResponse) -> StreamingHttpResponse
# The reverse proxy might have sent us the real external IP
remote_ip = request.META.get('HTTP_X_REAL_IP')
if remote_ip is None:
remote_ip = request.META['REMOTE_ADDR']
# Get the requestor's email address and client, if available.
try:
email = request._email
except Exception:
email = "unauth"
try:
client = request.client.name
except Exception:
client = "?"
if response.streaming:
content_iter = response.streaming_content
content = None
else:
content = response.content
content_iter = None
write_log_line(request._log_data, request.path, request.method,
remote_ip, email, client, status_code=response.status_code,
error_content=content, error_content_iter=content_iter)
return response
class JsonErrorHandler(MiddlewareMixin):
def process_exception(self, request, exception):
# type: (HttpRequest, Exception) -> Optional[HttpResponse]
if isinstance(exception, JsonableError):
return json_response_from_error(exception)
if request.error_format == "JSON":
logging.error(traceback.format_exc())
return json_error(_("Internal server error"), status=500)
return None
class TagRequests(MiddlewareMixin):
def process_view(self, request, view_func, args, kwargs):
# type: (HttpRequest, Callable[..., HttpResponse], List[str], Dict[str, Any]) -> None
self.process_request(request)
def process_request(self, request):
# type: (HttpRequest) -> None
if request.path.startswith("/api/") or request.path.startswith("/json/"):
request.error_format = "JSON"
else:
request.error_format = "HTML"
class CsrfFailureError(JsonableError):
http_status_code = 403
code = ErrorCode.CSRF_FAILED
data_fields = ['reason']
def __init__(self, reason):
# type: (Text) -> None
self.reason = reason # type: Text
@staticmethod
def msg_format():
# type: () -> Text
return _("CSRF Error: {reason}")
def csrf_failure(request, reason=""):
# type: (HttpRequest, Text) -> HttpResponse
if request.error_format == "JSON":
return json_response_from_error(CsrfFailureError(reason))
else:
return html_csrf_failure(request, reason)
class RateLimitMiddleware(MiddlewareMixin):
def process_response(self, request, response):
# type: (HttpRequest, HttpResponse) -> HttpResponse
if not settings.RATE_LIMITING:
return response
from zerver.lib.rate_limiter import max_api_calls, RateLimitedUser
# Add X-RateLimit-*** headers
if hasattr(request, '_ratelimit_applied_limits'):
entity = RateLimitedUser(request.user)
response['X-RateLimit-Limit'] = str(max_api_calls(entity))
if hasattr(request, '_ratelimit_secs_to_freedom'):
response['X-RateLimit-Reset'] = str(int(time.time() + request._ratelimit_secs_to_freedom))
if hasattr(request, '_ratelimit_remaining'):
response['X-RateLimit-Remaining'] = str(request._ratelimit_remaining)
return response
def process_exception(self, request, exception):
# type: (HttpRequest, Exception) -> Optional[HttpResponse]
if isinstance(exception, RateLimited):
resp = json_error(
_("API usage exceeded rate limit"),
data={'retry-after': request._ratelimit_secs_to_freedom},
status=429
)
resp['Retry-After'] = request._ratelimit_secs_to_freedom
return resp
return None
class FlushDisplayRecipientCache(MiddlewareMixin):
def process_response(self, request, response):
# type: (HttpRequest, HttpResponse) -> HttpResponse
# We flush the per-request caches after every request, so they
# are not shared at all between requests.
flush_per_request_caches()
return response
class SessionHostDomainMiddleware(SessionMiddleware):
def process_response(self, request, response):
# type: (HttpRequest, HttpResponse) -> HttpResponse
try:
request.get_host()
except DisallowedHost:
# If we get a DisallowedHost exception trying to access
# the host, (1) the request is failed anyway and so the
# below code will do nothing, and (2) the below will
# trigger a recursive exception, breaking things, so we
# just return here.
return response
if (not request.path.startswith("/static/") and not request.path.startswith("/api/") and
not request.path.startswith("/json/")):
subdomain = get_subdomain(request)
if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
realm = get_realm(subdomain)
if (realm is None):
return render(request, "zerver/invalid_realm.html")
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
except AttributeError:
pass
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if modified or settings.SESSION_SAVE_EVERY_REQUEST:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
# Skip session save for 500 responses, refs #3881.
if response.status_code != 500:
request.session.save()
host = request.get_host().split(':')[0]
# The subdomains feature overrides the
# SESSION_COOKIE_DOMAIN setting, since the setting
# is a fixed value and with subdomains enabled,
# the session cookie domain has to vary with the
# subdomain.
session_cookie_domain = host
response.set_cookie(settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=session_cookie_domain,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
return response
class SetRemoteAddrFromForwardedFor(MiddlewareMixin):
"""
Middleware that sets REMOTE_ADDR based on the HTTP_X_FORWARDED_FOR.
This middleware replicates Django's former SetRemoteAddrFromForwardedFor middleware.
Because Zulip sits behind a NGINX reverse proxy, if the HTTP_X_FORWARDED_FOR
is set in the request, then it has properly been set by NGINX.
Therefore HTTP_X_FORWARDED_FOR's value is trusted.
"""
def process_request(self, request):
# type: (HttpRequest) -> None
try:
real_ip = request.META['HTTP_X_FORWARDED_FOR']
except KeyError:
return None
else:
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.
# For NGINX reverse proxy servers, the client's IP will be the first one.
real_ip = real_ip.split(",")[0].strip()
request.META['REMOTE_ADDR'] = real_ip
|
|
import time
from unittest import TestCase
from mock import Mock, PropertyMock, patch
from carbon.cache import (
MetricCache, _MetricCache, DrainStrategy, MaxStrategy, RandomStrategy,
SortedStrategy, TimeSortedStrategy, BucketMaxStrategy
)
class MetricCacheTest(TestCase):
def setUp(self):
settings = {
'MAX_CACHE_SIZE': float('inf'),
'CACHE_SIZE_LOW_WATERMARK': float('inf')
}
self._settings_patch = patch.dict('carbon.conf.settings', settings)
self._settings_patch.start()
self.strategy_mock = Mock(spec=DrainStrategy)
self.metric_cache = _MetricCache(self.strategy_mock)
def tearDown(self):
self._settings_patch.stop()
def test_constructor(self):
settings = {
'CACHE_WRITE_STRATEGY': 'max',
}
settings_patch = patch.dict('carbon.conf.settings', settings)
settings_patch.start()
cache = MetricCache()
self.assertNotEqual(cache, None)
self.assertTrue(isinstance(cache.strategy, MaxStrategy))
def test_cache_is_a_dict(self):
self.assertTrue(issubclass(_MetricCache, dict))
def test_initial_size(self):
self.assertEqual(0, self.metric_cache.size)
def test_store_new_metric(self):
self.metric_cache.store('foo', (123456, 1.0))
self.assertEqual(1, self.metric_cache.size)
self.assertEqual([(123456, 1.0)], list(self.metric_cache['foo'].items()))
def test_store_multiple_datapoints(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.assertEqual(2, self.metric_cache.size)
result = self.metric_cache['foo'].items()
self.assertTrue((123456, 1.0) in result)
self.assertTrue((123457, 2.0) in result)
def test_store_duplicate_timestamp(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123456, 2.0))
self.assertEqual(1, self.metric_cache.size)
self.assertEqual([(123456, 2.0)], list(self.metric_cache['foo'].items()))
def test_store_checks_fullness(self):
is_full_mock = PropertyMock()
with patch.object(_MetricCache, 'is_full', is_full_mock):
with patch('carbon.cache.events'):
metric_cache = _MetricCache()
metric_cache.store('foo', (123456, 1.0))
self.assertEqual(1, is_full_mock.call_count)
def test_store_on_full_triggers_events(self):
is_full_mock = PropertyMock(return_value=True)
with patch.object(_MetricCache, 'is_full', is_full_mock):
with patch('carbon.cache.events') as events_mock:
self.metric_cache.store('foo', (123456, 1.0))
events_mock.cacheFull.assert_called_with()
def test_pop_multiple_datapoints(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
result = self.metric_cache.pop('foo')
self.assertTrue((123456, 1.0) in result)
self.assertTrue((123457, 2.0) in result)
def test_pop_reduces_size(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.pop('foo')
self.assertEqual(0, self.metric_cache.size)
def test_pop_triggers_space_check(self):
with patch.object(self.metric_cache, '_check_available_space') as check_space_mock:
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.pop('foo')
self.assertEqual(1, check_space_mock.call_count)
def test_pop_triggers_space_event(self):
with patch('carbon.state.cacheTooFull', new=Mock(return_value=True)):
with patch('carbon.cache.events') as events_mock:
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.pop('foo')
events_mock.cacheSpaceAvailable.assert_called_with()
def test_pop_returns_sorted_timestamps(self):
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.store('foo', (123458, 3.0))
self.metric_cache.store('foo', (123456, 1.0))
result = self.metric_cache.pop('foo')
expected = [(123456, 1.0), (123457, 2.0), (123458, 3.0)]
self.assertEqual(expected, result)
def test_pop_raises_on_missing(self):
self.assertRaises(KeyError, self.metric_cache.pop, 'foo')
def test_get_datapoints(self):
self.metric_cache.store('foo', (123456, 1.0))
self.assertEqual([(123456, 1.0)], self.metric_cache.get_datapoints('foo'))
def test_get_datapoints_doesnt_pop(self):
self.metric_cache.store('foo', (123456, 1.0))
self.assertEqual([(123456, 1.0)], self.metric_cache.get_datapoints('foo'))
self.assertEqual(1, self.metric_cache.size)
self.assertEqual([(123456, 1.0)], self.metric_cache.get_datapoints('foo'))
def test_get_datapoints_returns_empty_on_missing(self):
self.assertEqual([], self.metric_cache.get_datapoints('foo'))
def test_get_datapoints_returns_sorted_timestamps(self):
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.store('foo', (123458, 3.0))
self.metric_cache.store('foo', (123456, 1.0))
result = self.metric_cache.get_datapoints('foo')
expected = [(123456, 1.0), (123457, 2.0), (123458, 3.0)]
self.assertEqual(expected, result)
def test_drain_metric_respects_strategy(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('bar', (123456, 1.0))
self.metric_cache.store('baz', (123456, 1.0))
self.strategy_mock.return_value.choose_item.side_effect = ['bar', 'baz', 'foo']
self.assertEqual('bar', self.metric_cache.drain_metric()[0])
self.assertEqual('baz', self.metric_cache.drain_metric()[0])
self.assertEqual('foo', self.metric_cache.drain_metric()[0])
def test_drain_metric_works_without_strategy(self):
metric_cache = _MetricCache() # No strategy
metric_cache.store('foo', (123456, 1.0))
self.assertEqual('foo', metric_cache.drain_metric()[0])
def test_is_full_short_circuits_on_inf(self):
with patch.object(self.metric_cache, 'size') as size_mock:
self.metric_cache.is_full
size_mock.assert_not_called()
def test_is_full(self):
self._settings_patch.values['MAX_CACHE_SIZE'] = 2.0
self._settings_patch.start()
with patch('carbon.cache.events'):
self.assertFalse(self.metric_cache.is_full)
self.metric_cache.store('foo', (123456, 1.0))
self.assertFalse(self.metric_cache.is_full)
self.metric_cache.store('foo', (123457, 1.0))
self.assertTrue(self.metric_cache.is_full)
def test_counts_one_datapoint(self):
self.metric_cache.store('foo', (123456, 1.0))
self.assertEqual([('foo', 1)], self.metric_cache.counts)
def test_counts_two_datapoints(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.assertEqual([('foo', 2)], self.metric_cache.counts)
def test_counts_multiple_datapoints(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.store('bar', (123458, 3.0))
self.assertTrue(('foo', 2) in self.metric_cache.counts)
self.assertTrue(('bar', 1) in self.metric_cache.counts)
class DrainStrategyTest(TestCase):
def setUp(self):
self.metric_cache = _MetricCache()
def test_bucketmax_strategy(self):
bucketmax_strategy = BucketMaxStrategy(self.metric_cache)
self.metric_cache.strategy = bucketmax_strategy
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.store('foo', (123458, 3.0))
self.metric_cache.store('bar', (123459, 4.0))
self.metric_cache.store('bar', (123460, 5.0))
self.metric_cache.store('baz', (123461, 6.0))
# foo has 3
self.assertEqual('foo', bucketmax_strategy.choose_item())
# add 2 more 'bar' for 4 total
self.metric_cache.store('bar', (123462, 8.0))
self.metric_cache.store('bar', (123463, 9.0))
self.assertEqual('bar', bucketmax_strategy.choose_item())
self.metric_cache.pop('foo')
self.metric_cache.pop('bar')
self.assertEqual('baz', bucketmax_strategy.choose_item())
def test_max_strategy(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.store('foo', (123458, 3.0))
self.metric_cache.store('bar', (123459, 4.0))
self.metric_cache.store('bar', (123460, 5.0))
self.metric_cache.store('baz', (123461, 6.0))
max_strategy = MaxStrategy(self.metric_cache)
# foo has 3
self.assertEqual('foo', max_strategy.choose_item())
# add 2 more 'bar' for 4 total
self.metric_cache.store('bar', (123462, 8.0))
self.metric_cache.store('bar', (123463, 9.0))
self.assertEqual('bar', max_strategy.choose_item())
self.metric_cache.pop('foo')
self.metric_cache.pop('bar')
self.assertEqual('baz', max_strategy.choose_item())
def test_sorted_strategy_static_cache(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.store('foo', (123458, 3.0))
self.metric_cache.store('bar', (123459, 4.0))
self.metric_cache.store('bar', (123460, 5.0))
self.metric_cache.store('baz', (123461, 6.0))
sorted_strategy = SortedStrategy(self.metric_cache)
# In order from most to least
self.assertEqual('foo', sorted_strategy.choose_item())
self.assertEqual('bar', sorted_strategy.choose_item())
self.assertEqual('baz', sorted_strategy.choose_item())
def test_sorted_strategy_changing_sizes(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.store('foo', (123458, 3.0))
self.metric_cache.store('bar', (123459, 4.0))
self.metric_cache.store('bar', (123460, 5.0))
self.metric_cache.store('baz', (123461, 6.0))
sorted_strategy = SortedStrategy(self.metric_cache)
# In order from most to least foo, bar, baz
self.assertEqual('foo', sorted_strategy.choose_item())
# 'baz' gets 2 more, now greater than 'bar'
self.metric_cache.store('baz', (123461, 6.0))
self.metric_cache.store('baz', (123461, 6.0))
# But 'bar' is popped anyway, because sort has already happened
self.assertEqual('bar', sorted_strategy.choose_item())
self.assertEqual('baz', sorted_strategy.choose_item())
# Sort happens again
self.assertEqual('foo', sorted_strategy.choose_item())
self.assertEqual('bar', sorted_strategy.choose_item())
self.assertEqual('baz', sorted_strategy.choose_item())
def test_time_sorted_strategy(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('foo', (123457, 2.0))
self.metric_cache.store('foo', (123458, 3.0))
self.metric_cache.store('bar', (123459, 4.0))
self.metric_cache.store('bar', (123460, 5.0))
self.metric_cache.store('baz', (123461, 6.0))
time_sorted_strategy = TimeSortedStrategy(self.metric_cache)
# In order: foo, bar, baz
self.assertEqual('foo', time_sorted_strategy.choose_item())
# 'baz' gets older points.
self.metric_cache.store('baz', (123450, 6.0))
self.metric_cache.store('baz', (123451, 6.0))
# But 'bar' is popped anyway, because sort has already happened
self.assertEqual('bar', time_sorted_strategy.choose_item())
self.assertEqual('baz', time_sorted_strategy.choose_item())
# Sort happens again
self.assertEqual('baz', time_sorted_strategy.choose_item())
self.assertEqual('foo', time_sorted_strategy.choose_item())
self.assertEqual('bar', time_sorted_strategy.choose_item())
def test_time_sorted_strategy_min_lag(self):
settings = {
'MIN_TIMESTAMP_LAG': 5,
}
settings_patch = patch.dict('carbon.conf.settings', settings)
settings_patch.start()
now = time.time()
self.metric_cache.store('old', (now - 10, 1.0))
self.metric_cache.store('new', (now, 2.0))
time_sorted_strategy = TimeSortedStrategy(self.metric_cache)
self.assertEqual('old', time_sorted_strategy.choose_item())
self.metric_cache.pop('old')
self.assertEqual(None, time_sorted_strategy.choose_item())
class RandomStrategyTest(TestCase):
def setUp(self):
self.metric_cache = _MetricCache()
def test_random_strategy(self):
self.metric_cache.store('foo', (123456, 1.0))
self.metric_cache.store('bar', (123457, 2.0))
self.metric_cache.store('baz', (123458, 3.0))
strategy = RandomStrategy(self.metric_cache)
for _i in range(3):
item = strategy.choose_item()
self.assertTrue(item in self.metric_cache)
self.metric_cache.pop(item)
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
from oslo_config import cfg
from oslo_policy import policy
from oslo_utils import timeutils
from six.moves.urllib import parse as urlparse
from magnum.api.controllers.v1 import bay as api_bay
from magnum.common import utils
from magnum.conductor import api as rpcapi
from magnum import objects
from magnum.tests import base
from magnum.tests.unit.api import base as api_base
from magnum.tests.unit.api import utils as apiutils
from magnum.tests.unit.objects import utils as obj_utils
class TestBayObject(base.TestCase):
def test_bay_init(self):
bay_dict = apiutils.bay_post_data(baymodel_id=None)
del bay_dict['node_count']
del bay_dict['master_count']
del bay_dict['bay_create_timeout']
bay = api_bay.Bay(**bay_dict)
self.assertEqual(bay.node_count, 1)
self.assertEqual(bay.master_count, 1)
self.assertEqual(bay.bay_create_timeout, 0)
class TestListBay(api_base.FunctionalTest):
def setUp(self):
super(TestListBay, self).setUp()
obj_utils.create_test_baymodel(self.context)
def test_empty(self):
response = self.get_json('/bays')
self.assertEqual([], response['bays'])
def test_one(self):
bay = obj_utils.create_test_bay(self.context)
response = self.get_json('/bays')
self.assertEqual(bay.uuid, response['bays'][0]["uuid"])
for key in ("name", "baymodel_id", "node_count", "status",
"master_count"):
self.assertIn(key, response['bays'][0])
def test_get_one(self):
bay = obj_utils.create_test_bay(self.context)
response = self.get_json('/bays/%s' % bay['uuid'])
self.assertEqual(bay.uuid, response['uuid'])
for key in ("name", "baymodel_id", "node_count", "status",
"api_address", "discovery_url", "node_addresses",
"master_count"):
self.assertIn(key, response)
def test_get_one_by_name(self):
bay = obj_utils.create_test_bay(self.context)
response = self.get_json('/bays/%s' % bay['name'])
self.assertEqual(bay.uuid, response['uuid'])
for key in ("name", "baymodel_id", "node_count", "status",
"api_address", "discovery_url", "node_addresses",
"master_count"):
self.assertIn(key, response)
def test_get_one_by_name_not_found(self):
response = self.get_json(
'/bays/not_found',
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_get_one_by_name_multiple_bay(self):
obj_utils.create_test_bay(self.context, name='test_bay',
uuid=utils.generate_uuid())
obj_utils.create_test_bay(self.context, name='test_bay',
uuid=utils.generate_uuid())
response = self.get_json('/bays/test_bay', expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_get_all_with_pagination_marker(self):
bay_list = []
for id_ in range(4):
bay = obj_utils.create_test_bay(self.context, id=id_,
uuid=utils.generate_uuid())
bay_list.append(bay)
response = self.get_json('/bays?limit=3&marker=%s'
% bay_list[2].uuid)
self.assertEqual(1, len(response['bays']))
self.assertEqual(bay_list[-1].uuid, response['bays'][0]['uuid'])
def test_detail(self):
bay = obj_utils.create_test_bay(self.context)
response = self.get_json('/bays/detail')
self.assertEqual(bay.uuid, response['bays'][0]["uuid"])
for key in ("name", "baymodel_id", "node_count", "status",
"master_count"):
self.assertIn(key, response['bays'][0])
def test_detail_with_pagination_marker(self):
bay_list = []
for id_ in range(4):
bay = obj_utils.create_test_bay(self.context, id=id_,
uuid=utils.generate_uuid())
bay_list.append(bay)
response = self.get_json('/bays/detail?limit=3&marker=%s'
% bay_list[2].uuid)
self.assertEqual(1, len(response['bays']))
self.assertEqual(bay_list[-1].uuid, response['bays'][0]['uuid'])
for key in ("name", "baymodel_id", "node_count", "status",
"discovery_url", "api_address", "node_addresses"):
self.assertIn(key, response['bays'][0])
def test_detail_against_single(self):
bay = obj_utils.create_test_bay(self.context)
response = self.get_json('/bays/%s/detail' % bay['uuid'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_many(self):
bm_list = []
for id_ in range(5):
bay = obj_utils.create_test_bay(self.context, id=id_,
uuid=utils.generate_uuid())
bm_list.append(bay.uuid)
response = self.get_json('/bays')
self.assertEqual(len(bm_list), len(response['bays']))
uuids = [b['uuid'] for b in response['bays']]
self.assertEqual(sorted(bm_list), sorted(uuids))
def test_links(self):
uuid = utils.generate_uuid()
obj_utils.create_test_bay(self.context, id=1, uuid=uuid)
response = self.get_json('/bays/%s' % uuid)
self.assertIn('links', response.keys())
self.assertEqual(2, len(response['links']))
self.assertIn(uuid, response['links'][0]['href'])
for l in response['links']:
bookmark = l['rel'] == 'bookmark'
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
def test_collection_links(self):
for id_ in range(5):
obj_utils.create_test_bay(self.context, id=id_,
uuid=utils.generate_uuid())
response = self.get_json('/bays/?limit=3')
self.assertEqual(3, len(response['bays']))
next_marker = response['bays'][-1]['uuid']
self.assertIn(next_marker, response['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
for id_ in range(5):
obj_utils.create_test_bay(self.context, id=id_,
uuid=utils.generate_uuid())
response = self.get_json('/bays')
self.assertEqual(3, len(response['bays']))
next_marker = response['bays'][-1]['uuid']
self.assertIn(next_marker, response['next'])
class TestPatch(api_base.FunctionalTest):
def setUp(self):
super(TestPatch, self).setUp()
self.baymodel = obj_utils.create_test_baymodel(self.context)
self.bay = obj_utils.create_test_bay(self.context,
name='bay_example_A',
node_count=3)
p = mock.patch.object(rpcapi.API, 'bay_update')
self.mock_bay_update = p.start()
self.mock_bay_update.side_effect = self._simulate_rpc_bay_update
self.addCleanup(p.stop)
def _simulate_rpc_bay_update(self, bay):
bay.save()
return bay
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_ok(self, mock_utcnow):
name = 'bay_example_B'
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json('/bays/%s' % self.bay.uuid,
[{'path': '/name', 'value': name,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/bays/%s' % self.bay.uuid)
self.assertEqual(name, response['name'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
# Assert nothing else was changed
self.assertEqual(self.bay.uuid, response['uuid'])
self.assertEqual(self.bay.baymodel_id, response['baymodel_id'])
self.assertEqual(self.bay.node_count, response['node_count'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_ok_by_name(self, mock_utcnow):
name = 'bay_example_B'
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json('/bays/%s' % self.bay.name,
[{'path': '/name', 'value': name,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/bays/%s' % self.bay.uuid)
self.assertEqual(name, response['name'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
# Assert nothing else was changed
self.assertEqual(self.bay.uuid, response['uuid'])
self.assertEqual(self.bay.baymodel_id, response['baymodel_id'])
self.assertEqual(self.bay.node_count, response['node_count'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_ok_by_name_not_found(self, mock_utcnow):
name = 'not_found'
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json('/bays/%s' % name,
[{'path': '/name', 'value': name,
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(404, response.status_code)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_ok_by_name_multiple_bay(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
obj_utils.create_test_bay(self.context, name='test_bay',
uuid=utils.generate_uuid())
obj_utils.create_test_bay(self.context, name='test_bay',
uuid=utils.generate_uuid())
response = self.patch_json('/bays/test_bay',
[{'path': '/name', 'value': 'test_bay',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(409, response.status_code)
def test_replace_baymodel_id(self):
baymodel = obj_utils.create_test_baymodel(self.context,
uuid=utils.generate_uuid())
response = self.patch_json('/bays/%s' % self.bay.uuid,
[{'path': '/baymodel_id',
'value': baymodel.uuid,
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
def test_replace_non_existent_baymodel_id(self):
response = self.patch_json('/bays/%s' % self.bay.uuid,
[{'path': '/baymodel_id',
'value': utils.generate_uuid(),
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['error_message'])
def test_replace_invalid_node_count(self):
response = self.patch_json('/bays/%s' % self.bay.uuid,
[{'path': '/node_count', 'value': -1,
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['error_message'])
def test_replace_non_existent_bay(self):
response = self.patch_json('/bays/%s' % utils.generate_uuid(),
[{'path': '/name',
'value': 'bay_example_B',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_add_ok(self):
name = 'bay_example_B'
response = self.patch_json(
'/bays/%s' % self.bay.uuid,
[{'path': '/name', 'value': name, 'op': 'add'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_int)
response = self.get_json('/bays/%s' % self.bay.uuid)
self.assertEqual(name, response['name'])
# Assert nothing else was changed
self.assertEqual(self.bay.uuid, response['uuid'])
self.assertEqual(self.bay.baymodel_id, response['baymodel_id'])
self.assertEqual(self.bay.node_count, response['node_count'])
def test_add_multi(self):
json = [
{
'path': '/name',
'value': 'bay_example_B',
'op': 'add'
},
{
'path': '/node_count',
'value': 33,
'op': 'add'
}
]
response = self.patch_json('/bays/%s' % self.bay.uuid, json)
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/bays/%s' % self.bay.uuid)
self.assertEqual('bay_example_B', response['name'])
self.assertEqual(33, response['node_count'])
# Assert nothing else was changed
self.assertEqual(self.bay.uuid, response['uuid'])
self.assertEqual(self.bay.baymodel_id, response['baymodel_id'])
def test_add_non_existent_property(self):
response = self.patch_json(
'/bays/%s' % self.bay.uuid,
[{'path': '/foo', 'value': 'bar', 'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_remove_ok(self):
response = self.get_json('/bays/%s' % self.bay.uuid)
self.assertIsNotNone(response['name'])
response = self.patch_json('/bays/%s' % self.bay.uuid,
[{'path': '/name', 'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/bays/%s' % self.bay.uuid)
self.assertIsNone(response['name'])
# Assert nothing else was changed
self.assertEqual(self.bay.uuid, response['uuid'])
self.assertEqual(self.bay.baymodel_id, response['baymodel_id'])
self.assertEqual(self.bay.node_count, response['node_count'])
self.assertEqual(self.bay.master_count, response['master_count'])
def test_remove_uuid(self):
response = self.patch_json('/bays/%s' % self.bay.uuid,
[{'path': '/uuid', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_remove_baymodel_id(self):
response = self.patch_json('/bays/%s' % self.bay.uuid,
[{'path': '/baymodel_id', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_remove_non_existent_property(self):
response = self.patch_json(
'/bays/%s' % self.bay.uuid,
[{'path': '/non-existent', 'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['error_message'])
class TestPost(api_base.FunctionalTest):
def setUp(self):
super(TestPost, self).setUp()
self.baymodel = obj_utils.create_test_baymodel(self.context)
p = mock.patch.object(rpcapi.API, 'bay_create')
self.mock_bay_create = p.start()
self.mock_bay_create.side_effect = self._simulate_rpc_bay_create
self.addCleanup(p.stop)
def _simulate_rpc_bay_create(self, bay, bay_create_timeout):
bay.create()
return bay
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_bay(self, mock_utcnow):
bdict = apiutils.bay_post_data()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json('/bays', bdict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/bays/%s' % bdict['uuid']
self.assertEqual(urlparse.urlparse(response.location).path,
expected_location)
self.assertEqual(bdict['uuid'], response.json['uuid'])
self.assertNotIn('updated_at', response.json.keys)
return_created_at = timeutils.parse_isotime(
response.json['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
def test_create_bay_set_project_id_and_user_id(self):
bdict = apiutils.bay_post_data()
def _simulate_rpc_bay_create(bay, bay_create_timeout):
self.assertEqual(bay.project_id, self.context.project_id)
self.assertEqual(bay.user_id, self.context.user_id)
bay.create()
return bay
self.mock_bay_create.side_effect = _simulate_rpc_bay_create
self.post_json('/bays', bdict)
def test_create_bay_doesnt_contain_id(self):
with mock.patch.object(self.dbapi, 'create_bay',
wraps=self.dbapi.create_bay) as cc_mock:
bdict = apiutils.bay_post_data(name='bay_example_A')
response = self.post_json('/bays', bdict)
self.assertEqual(bdict['name'], response.json['name'])
cc_mock.assert_called_once_with(mock.ANY)
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cc_mock.call_args[0][0])
def test_create_bay_generate_uuid(self):
bdict = apiutils.bay_post_data()
del bdict['uuid']
response = self.post_json('/bays', bdict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
self.assertEqual(bdict['name'], response.json['name'])
self.assertTrue(utils.is_uuid_like(response.json['uuid']))
def test_create_bay_no_baymodel_id(self):
bdict = apiutils.bay_post_data()
del bdict['baymodel_id']
response = self.post_json('/bays', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
def test_create_bay_with_non_existent_baymodel_id(self):
bdict = apiutils.bay_post_data(baymodel_id=utils.generate_uuid())
response = self.post_json('/bays', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_bay_with_baymodel_name(self):
bdict = apiutils.bay_post_data(baymodel_id=self.baymodel.name)
response = self.post_json('/bays', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
def test_create_bay_with_node_count_zero(self):
bdict = apiutils.bay_post_data()
bdict['node_count'] = 0
response = self.post_json('/bays', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_bay_with_node_count_negative(self):
bdict = apiutils.bay_post_data()
bdict['node_count'] = -1
response = self.post_json('/bays', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_bay_with_no_node_count(self):
bdict = apiutils.bay_post_data()
del bdict['node_count']
response = self.post_json('/bays', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
self.assertEqual(1, response.json['node_count'])
def test_create_bay_with_master_count_zero(self):
bdict = apiutils.bay_post_data()
bdict['master_count'] = 0
response = self.post_json('/bays', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_bay_with_no_master_count(self):
bdict = apiutils.bay_post_data()
del bdict['master_count']
response = self.post_json('/bays', bdict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
self.assertEqual(1, response.json['master_count'])
def test_create_bay_with_invalid_long_name(self):
bdict = apiutils.bay_post_data(name='x' * 256)
response = self.post_json('/bays', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_bay_with_invalid_empty_name(self):
bdict = apiutils.bay_post_data(name='')
response = self.post_json('/bays', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_bay_with_timeout_none(self):
bdict = apiutils.bay_post_data()
bdict['bay_create_timeout'] = None
response = self.post_json('/bays', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
def test_create_bay_with_no_timeout(self):
def _simulate_rpc_bay_create(bay, bay_create_timeout):
self.assertEqual(0, bay_create_timeout)
bay.create()
return bay
self.mock_bay_create.side_effect = _simulate_rpc_bay_create
bdict = apiutils.bay_post_data()
del bdict['bay_create_timeout']
response = self.post_json('/bays', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
def test_create_bay_with_timeout_negative(self):
bdict = apiutils.bay_post_data()
bdict['bay_create_timeout'] = -1
response = self.post_json('/bays', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_bay_with_timeout_zero(self):
bdict = apiutils.bay_post_data()
bdict['bay_create_timeout'] = 0
response = self.post_json('/bays', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
class TestDelete(api_base.FunctionalTest):
def setUp(self):
super(TestDelete, self).setUp()
self.baymodel = obj_utils.create_test_baymodel(self.context)
self.bay = obj_utils.create_test_bay(self.context)
p = mock.patch.object(rpcapi.API, 'bay_delete')
self.mock_bay_delete = p.start()
self.mock_bay_delete.side_effect = self._simulate_rpc_bay_delete
self.addCleanup(p.stop)
def _simulate_rpc_bay_delete(self, bay_uuid):
bay = objects.Bay.get_by_uuid(self.context, bay_uuid)
bay.destroy()
def test_delete_bay(self):
self.delete('/bays/%s' % self.bay.uuid)
response = self.get_json('/bays/%s' % self.bay.uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_bay_not_found(self):
uuid = utils.generate_uuid()
response = self.delete('/bays/%s' % uuid, expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_bay_with_pods(self):
obj_utils.create_test_pod(self.context, bay_uuid=self.bay.uuid)
response = self.delete('/bays/%s' % self.bay.uuid,
expect_errors=True)
self.assertEqual(204, response.status_int)
def test_delete_bay_with_services(self):
obj_utils.create_test_service(self.context, bay_uuid=self.bay.uuid)
response = self.delete('/bays/%s' % self.bay.uuid,
expect_errors=True)
self.assertEqual(204, response.status_int)
def test_delete_bay_with_replication_controllers(self):
obj_utils.create_test_rc(self.context, bay_uuid=self.bay.uuid)
response = self.delete('/bays/%s' % self.bay.uuid,
expect_errors=True)
self.assertEqual(204, response.status_int)
def test_delete_bay_with_name_not_found(self):
response = self.delete('/bays/not_found', expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_bay_with_name(self):
response = self.delete('/bays/%s' % self.bay.name,
expect_errors=True)
self.assertEqual(204, response.status_int)
def test_delete_multiple_bay_by_name(self):
obj_utils.create_test_bay(self.context, name='test_bay',
uuid=utils.generate_uuid())
obj_utils.create_test_bay(self.context, name='test_bay',
uuid=utils.generate_uuid())
response = self.delete('/bays/test_bay', expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
class TestBayPolicyEnforcement(api_base.FunctionalTest):
def setUp(self):
super(TestBayPolicyEnforcement, self).setUp()
obj_utils.create_test_baymodel(self.context)
def _common_policy_check(self, rule, func, *arg, **kwarg):
self.policy.set_rules({rule: "project:non_fake"})
exc = self.assertRaises(policy.PolicyNotAuthorized,
func, *arg, **kwarg)
self.assertTrue(exc.message.startswith(rule))
self.assertTrue(exc.message.endswith("disallowed by policy"))
def test_policy_disallow_get_all(self):
self._common_policy_check(
"bay:get_all", self.get_json, '/bays')
def test_policy_disallow_get_one(self):
self._common_policy_check(
"bay:get", self.get_json, '/bays/111-222-333')
def test_policy_disallow_detail(self):
self._common_policy_check(
"bay:detail", self.get_json, '/bays/111-222-333/detail')
def test_policy_disallow_update(self):
self.bay = obj_utils.create_test_bay(self.context,
name='bay_example_A',
node_count=3)
self._common_policy_check(
"bay:update", self.patch_json, '/bays/%s' % self.bay.name,
[{'path': '/name', 'value': "new_name", 'op': 'replace'}])
def test_policy_disallow_create(self):
bdict = apiutils.bay_post_data(name='bay_example_A')
self._common_policy_check(
"bay:create", self.post_json, '/bays', bdict)
def _simulate_rpc_bay_delete(self, bay_uuid):
bay = objects.Bay.get_by_uuid(self.context, bay_uuid)
bay.destroy()
def test_policy_disallow_delete(self):
p = mock.patch.object(rpcapi.API, 'bay_delete')
self.mock_bay_delete = p.start()
self.mock_bay_delete.side_effect = self._simulate_rpc_bay_delete
self.addCleanup(p.stop)
self._common_policy_check(
"bay:delete", self.delete, '/bays/test_bay')
|
|
import asyncio
import random
import logging
import hashlib
import inspect
from aiohttp import web, hdrs
from sockjs.session import SessionManager
from sockjs.protocol import IFRAME_HTML, json
from sockjs.transports import handlers
from sockjs.transports.utils import session_cookie
from sockjs.transports.utils import cors_headers
from sockjs.transports.utils import cache_headers
from sockjs.transports.rawwebsocket import RawWebSocketTransport
log = logging.getLogger('sockjs')
def get_manager(name, app):
return app['__sockjs_managers__'][name]
def add_endpoint(app, handler, *, name='', prefix='/sockjs',
manager=None, disable_transports=(),
sockjs_cdn='http://cdn.sockjs.org/sockjs-0.3.4.min.js',
cookie_needed=True):
assert callable(handler), handler
if (not asyncio.iscoroutinefunction(handler) and
not inspect.isgeneratorfunction(handler)):
handler = asyncio.coroutine(handler)
router = app.router
# set session manager
if manager is None:
manager = SessionManager(name, app, handler, app.loop)
if manager.name != name:
raise ValueError(
'Session manage must have same name as sockjs route')
managers = app.setdefault('__sockjs_managers__', {})
if name in managers:
raise ValueError('SockJS "%s" route already registered' % name)
managers[name] = manager
# register routes
route = SockJSRoute(
name, manager, sockjs_cdn,
handlers, disable_transports, cookie_needed)
if prefix.endswith('/'):
prefix = prefix[:-1]
route_name = 'sockjs-url-%s-greeting' % name
router.add_route(
hdrs.METH_GET, prefix, route.greeting, name=route_name)
route_name = 'sockjs-url-%s' % name
router.add_route(
hdrs.METH_GET, '%s/' % prefix,
route.greeting, name=route_name)
route_name = 'sockjs-%s' % name
router.add_route(
hdrs.METH_ANY,
'%s/{server}/{session}/{transport}' % prefix,
route.handler, name=route_name)
route_name = 'sockjs-websocket-%s' % name
router.add_route(
hdrs.METH_GET, '%s/websocket' % prefix,
route.websocket, name=route_name)
router.add_route(
hdrs.METH_GET, '%s/info' % prefix,
route.info, name='sockjs-info-%s' % name)
router.add_route(
hdrs.METH_OPTIONS,
'%s/info' % prefix,
route.info_options, name='sockjs-info-options-%s' % name)
route_name = 'sockjs-iframe-%s' % name
router.add_route(
hdrs.METH_GET,
'%s/iframe.html' % prefix, route.iframe, name=route_name)
route_name = 'sockjs-iframe-ver-%s' % name
router.add_route(
hdrs.METH_GET,
'%s/iframe{version}.html' % prefix, route.iframe, name=route_name)
# start session gc
manager.start()
class SockJSRoute:
def __init__(self, name, manager,
sockjs_cdn, handlers, disable_transports, cookie_needed=True):
self.name = name
self.manager = manager
self.handlers = handlers
self.disable_transports = dict((k, 1) for k in disable_transports)
self.cookie_needed = cookie_needed
self.iframe_html = (IFRAME_HTML % sockjs_cdn).encode('utf-8')
self.iframe_html_hxd = hashlib.md5(self.iframe_html).hexdigest()
def handler(self, request):
info = request.match_info
# lookup transport
tid = info['transport']
if tid not in self.handlers or tid in self.disable_transports:
return web.HTTPNotFound()
create, transport = self.handlers[tid]
# session
manager = self.manager
if not manager.started:
manager.start()
sid = info['session']
if not sid or '.' in sid or '.' in info['server']:
return web.HTTPNotFound()
try:
session = manager.get(sid, create, request=request)
except KeyError:
return web.HTTPNotFound(headers=session_cookie(request))
t = transport(manager, session, request)
try:
return (yield from t.process())
except asyncio.CancelledError:
raise
except web.HTTPException as exc:
return exc
except Exception as exc:
log.exception('Exception in transport: %s' % tid)
if manager.is_acquired(session):
yield from manager.release(session)
return web.HTTPInternalServerError()
def websocket(self, request):
# session
sid = '%0.9d' % random.randint(1, 2147483647)
session = self.manager.get(sid, True, request=request)
# websocket
if hdrs.ORIGIN in request.headers:
return web.HTTPNotFound()
transport = RawWebSocketTransport(self.manager, session, request)
try:
return (yield from transport.process())
except asyncio.CancelledError:
raise
except web.HTTPException as exc:
return exc
def info(self, request):
resp = web.Response(
content_type='application/json; charset=UTF-8')
resp.headers[hdrs.CACHE_CONTROL] = (
'no-store, no-cache, must-revalidate, max-age=0')
resp.headers.extend(cors_headers(request.headers))
info = {'entropy': random.randint(1, 2147483647),
'websocket': 'websocket' not in self.disable_transports,
'cookie_needed': self.cookie_needed,
'origins': ['*:*']}
resp.text = json.dumps(info)
return resp
def info_options(self, request):
resp = web.Response(
status=204, content_type='application/json; charset=UTF-8')
resp.headers[hdrs.CACHE_CONTROL] = (
'no-store, no-cache, must-revalidate, max-age=0')
resp.headers[hdrs.ACCESS_CONTROL_ALLOW_METHODS] = 'OPTIONS, GET'
resp.headers.extend(cors_headers(request.headers))
resp.headers.extend(cache_headers())
resp.headers.extend(session_cookie(request))
return resp
def iframe(self, request):
cached = request.headers.get(hdrs.IF_NONE_MATCH)
if cached:
response = web.Response(status=304)
response.headers.extend(cache_headers())
return response
return web.Response(
body=self.iframe_html,
content_type='text/html; charset=UTF-8',
headers=((hdrs.ETAG, self.iframe_html_hxd),) + cache_headers())
def greeting(self, request):
return web.Response(body=b'Welcome to SockJS!\n',
content_type='text/plain; charset=UTF-8')
|
|
# -*- coding: utf-8 -*-
"""trying out VAR filtering and multidimensional fft
Note: second half is copy and paste and does not run as script
incomplete definitions of variables, some I created in shell
Created on Thu Jan 07 12:23:40 2010
Author: josef-pktd
update 2010-10-22
2 arrays were not defined, copied from fft_filter.log.py but I did not check
what the results are.
Runs now without raising exception
"""
import numpy as np
from numpy.testing import assert_equal
from scipy import signal, stats
try:
from scipy.signal._signaltools import _centered as trim_centered
except ImportError:
# Must be using SciPy <1.8.0 where this function was moved (it's not a
# public SciPy function, but we need it here)
from scipy.signal.signaltools import _centered as trim_centered
from statsmodels.tsa.filters.filtertools import fftconvolveinv as fftconvolve
x = np.arange(40).reshape((2,20)).T
x = np.arange(60).reshape((3,20)).T
a3f = np.array([[[0.5, 1.], [1., 0.5]],
[[0.5, 1.], [1., 0.5]]])
a3f = np.ones((2,3,3))
nlags = a3f.shape[0]
ntrim = nlags//2
y0 = signal.convolve(x,a3f[:,:,0], mode='valid')
y1 = signal.convolve(x,a3f[:,:,1], mode='valid')
yf = signal.convolve(x[:,:,None],a3f)
y = yf[:,1,:] #
yvalid = yf[ntrim:-ntrim,yf.shape[1]//2,:]
#same result with fftconvolve
#signal.fftconvolve(x[:,:,None],a3f).shape
#signal.fftconvolve(x[:,:,None],a3f)[:,1,:]
print(trim_centered(y, x.shape))
# this raises an exception:
#print(trim_centered(yf, (x.shape).shape)
assert_equal(yvalid[:,0], y0.ravel())
assert_equal(yvalid[:,1], y1.ravel())
def arfilter(x, a):
'''apply an autoregressive filter to a series x
x can be 2d, a can be 1d, 2d, or 3d
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
a : array_like
autoregressive filter coefficients, ar lag polynomial
see Notes
Returns
-------
y : ndarray, 2d
filtered array, number of columns determined by x and a
Notes
-----
In general form this uses the linear filter ::
y = a(L)x
where
x : nobs, nvars
a : nlags, nvars, npoly
Depending on the shape and dimension of a this uses different
Lag polynomial arrays
case 1 : a is 1d or (nlags,1)
one lag polynomial is applied to all variables (columns of x)
case 2 : a is 2d, (nlags, nvars)
each series is independently filtered with its own
lag polynomial, uses loop over nvar
case 3 : a is 3d, (nlags, nvars, npoly)
the ith column of the output array is given by the linear filter
defined by the 2d array a[:,:,i], i.e. ::
y[:,i] = a(.,.,i)(L) * x
y[t,i] = sum_p sum_j a(p,j,i)*x(t-p,j)
for p = 0,...nlags-1, j = 0,...nvars-1,
for all t >= nlags
Note: maybe convert to axis=1, Not
TODO: initial conditions
'''
x = np.asarray(x)
a = np.asarray(a)
if x.ndim == 1:
x = x[:,None]
if x.ndim > 2:
raise ValueError('x array has to be 1d or 2d')
nvar = x.shape[1]
nlags = a.shape[0]
ntrim = nlags//2
# for x is 2d with ncols >1
if a.ndim == 1:
# case: identical ar filter (lag polynomial)
return signal.convolve(x, a[:,None], mode='valid')
# alternative:
#return signal.lfilter(a,[1],x.astype(float),axis=0)
elif a.ndim == 2:
if min(a.shape) == 1:
# case: identical ar filter (lag polynomial)
return signal.convolve(x, a, mode='valid')
# case: independent ar
#(a bit like recserar in gauss, but no x yet)
result = np.zeros((x.shape[0]-nlags+1, nvar))
for i in range(nvar):
# could also use np.convolve, but easier for swiching to fft
result[:,i] = signal.convolve(x[:,i], a[:,i], mode='valid')
return result
elif a.ndim == 3:
# case: vector autoregressive with lag matrices
# #not necessary:
# if np.any(a.shape[1:] != nvar):
# raise ValueError('if 3d shape of a has to be (nobs,nvar,nvar)')
yf = signal.convolve(x[:,:,None], a)
yvalid = yf[ntrim:-ntrim, yf.shape[1]//2,:]
return yvalid
a3f = np.ones((2,3,3))
y0ar = arfilter(x,a3f[:,:,0])
print(y0ar, x[1:] + x[:-1])
yres = arfilter(x,a3f[:,:,:2])
print(np.all(yres == (x[1:,:].sum(1) + x[:-1].sum(1))[:,None]))
yff = fftconvolve(x.astype(float)[:,:,None],a3f)
rvs = np.random.randn(500)
ar1fft = fftconvolve(rvs,np.array([1,-0.8]))
#ar1fftp = fftconvolve(np.r_[np.zeros(100),rvs,np.zeros(100)],np.array([1,-0.8]))
ar1fftp = fftconvolve(np.r_[np.zeros(100),rvs],np.array([1,-0.8]))
ar1lf = signal.lfilter([1], [1,-0.8], rvs)
ar1 = np.zeros(501)
for i in range(1,501):
ar1[i] = 0.8*ar1[i-1] + rvs[i-1]
#the previous looks wrong, is for generating ar with delayed error,
#or maybe for an ma(1) filter, (generating ar and applying ma filter are the same)
#maybe not since it replicates lfilter and fftp
#still strange explanation for convolution
#ok. because this is my fftconvolve, which is an inverse filter (read the namespace!)
#This is an AR filter
errar1 = np.zeros(501)
for i in range(1,500):
errar1[i] = rvs[i] - 0.8*rvs[i-1]
#print(ar1[-10:])
#print(ar1fft[-11:-1])
#print(ar1lf[-10:])
#print(ar1[:10])
#print(ar1fft[1:11])
#print(ar1lf[:10])
#print(ar1[100:110])
#print(ar1fft[100:110])
#print(ar1lf[100:110])
#
#arloop - lfilter - fftp (padded) are the same
print('\n compare: \nerrloop - arloop - fft - lfilter - fftp (padded)')
#print(np.column_stack((ar1[1:31],ar1fft[:30], ar1lf[:30]))
print(np.column_stack((errar1[1:31], ar1[1:31],ar1fft[:30], ar1lf[:30],
ar1fftp[100:130])))
def maxabs(x,y):
return np.max(np.abs(x-y))
print(maxabs(ar1[1:], ar1lf)) #0
print(maxabs(ar1[1:], ar1fftp[100:-1])) # around 1e-15
rvs3 = np.random.randn(500,3)
a3n = np.array([[1,1,1],[-0.8,0.5,0.1]])
a3n = np.array([[1,1,1],[-0.8,0.0,0.0]])
a3n = np.array([[1,-1,-1],[-0.8,0.0,0.0]])
a3n = np.array([[1,0,0],[-0.8,0.0,0.0]])
a3ne = np.r_[np.ones((1,3)),-0.8*np.eye(3)]
a3ne = np.r_[np.ones((1,3)),-0.8*np.eye(3)]
ar13fft = fftconvolve(rvs3,a3n)
ar13 = np.zeros((501,3))
for i in range(1,501):
ar13[i] = np.sum(a3n[1,:]*ar13[i-1]) + rvs[i-1]
#changes imp was not defined, not sure what it is supposed to be
#copied from a .log file
imp = np.zeros((10,3))
imp[0]=1
a3n = np.array([[1,0,0],[-0.8,0.0,0.0]])
fftconvolve(np.r_[np.zeros((100,3)),imp],a3n)[100:]
a3n = np.array([[1,0,0],[-0.8,-0.50,0.0]])
fftconvolve(np.r_[np.zeros((100,3)),imp],a3n)[100:]
a3n3 = np.array([[[ 1. , 0. , 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]],
[[-0.8, 0. , 0. ],
[ 0. , -0.8, 0. ],
[ 0. , 0. , -0.8]]])
a3n3 = np.array([[[ 1. , 0.5 , 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]],
[[-0.8, 0. , 0. ],
[ 0. , -0.8, 0. ],
[ 0. , 0. , -0.8]]])
ttt = fftconvolve(np.r_[np.zeros((100,3)),imp][:,:,None],a3n3.T)[100:]
gftt = ttt/ttt[0,:,:]
a3n3 = np.array([[[ 1. , 0 , 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]],
[[-0.8, 0.2 , 0. ],
[ 0 , 0.0, 0. ],
[ 0. , 0. , 0.8]]])
ttt = fftconvolve(np.r_[np.zeros((100,3)),imp][:,:,None],a3n3)[100:]
gftt = ttt/ttt[0,:,:]
signal.fftconvolve(np.dstack((imp,imp,imp)),a3n3)[1,:,:]
nobs = 10
imp = np.zeros((nobs,3))
imp[1] = 1.
ar13 = np.zeros((nobs+1,3))
for i in range(1,nobs+1):
ar13[i] = np.dot(a3n3[1,:,:],ar13[i-1]) + imp[i-1]
a3n3inv = np.zeros((nobs+1,3,3))
a3n3inv[0,:,:] = a3n3[0]
a3n3inv[1,:,:] = -a3n3[1]
for i in range(2,nobs+1):
a3n3inv[i,:,:] = np.dot(-a3n3[1],a3n3inv[i-1,:,:])
a3n3sy = np.array([[[ 1. , 0 , 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]],
[[-0.8, 0.2 , 0. ],
[ 0 , 0.0, 0. ],
[ 0. , 0. , 0.8]]])
nobs = 10
a = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0.0 ],
[ -0.1 , -0.8]]])
a2n3inv = np.zeros((nobs+1,2,2))
a2n3inv[0,:,:] = a[0]
a2n3inv[1,:,:] = -a[1]
for i in range(2,nobs+1):
a2n3inv[i,:,:] = np.dot(-a[1],a2n3inv[i-1,:,:])
nobs = 10
imp = np.zeros((nobs,2))
imp[0,0] = 1.
#a2 was missing, copied from .log file, not sure if correct
a2 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0. ],
[0.1, -0.8]]])
ar12 = np.zeros((nobs+1,2))
for i in range(1,nobs+1):
ar12[i] = np.dot(-a2[1,:,:],ar12[i-1]) + imp[i-1]
u = np.random.randn(10,2)
ar12r = np.zeros((nobs+1,2))
for i in range(1,nobs+1):
ar12r[i] = np.dot(-a2[1,:,:],ar12r[i-1]) + u[i-1]
a2inv = np.zeros((nobs+1,2,2))
a2inv[0,:,:] = a2[0]
a2inv[1,:,:] = -a2[1]
for i in range(2,nobs+1):
a2inv[i,:,:] = np.dot(-a2[1],a2inv[i-1,:,:])
nbins = 12
binProb = np.zeros(nbins) + 1.0/nbins
binSumProb = np.add.accumulate(binProb)
print(binSumProb)
print(stats.gamma.ppf(binSumProb,0.6379,loc=1.6,scale=39.555))
|
|
#!/usr/bin/env python3
# -*- mode: python -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
from PyQt5.Qt import QVBoxLayout, QLabel
from electrum.gui.qt.password_dialog import PasswordDialog, PW_PASSPHRASE
from electrum.gui.qt.util import *
from electrum.i18n import _
from electrum.util import PrintError
# The trickiest thing about this handler was getting windows properly
# parented on macOS.
class QtHandlerBase(QObject, PrintError):
'''An interface between the GUI (here, QT) and the device handling
logic for handling I/O.'''
passphrase_signal = pyqtSignal(object, object)
message_signal = pyqtSignal(object, object)
error_signal = pyqtSignal(object, object)
word_signal = pyqtSignal(object)
clear_signal = pyqtSignal()
query_signal = pyqtSignal(object, object)
yes_no_signal = pyqtSignal(object)
status_signal = pyqtSignal(object)
def __init__(self, win, device):
super(QtHandlerBase, self).__init__()
self.clear_signal.connect(self.clear_dialog)
self.error_signal.connect(self.error_dialog)
self.message_signal.connect(self.message_dialog)
self.passphrase_signal.connect(self.passphrase_dialog)
self.word_signal.connect(self.word_dialog)
self.query_signal.connect(self.win_query_choice)
self.yes_no_signal.connect(self.win_yes_no_question)
self.status_signal.connect(self._update_status)
self.win = win
self.device = device
self.dialog = None
self.done = threading.Event()
def top_level_window(self):
return self.win.top_level_window()
def update_status(self, paired):
self.status_signal.emit(paired)
def _update_status(self, paired):
if hasattr(self, 'button'):
button = self.button
icon = button.icon_paired if paired else button.icon_unpaired
button.setIcon(QIcon(icon))
def query_choice(self, msg, labels):
self.done.clear()
self.query_signal.emit(msg, labels)
self.done.wait()
return self.choice
def yes_no_question(self, msg):
self.done.clear()
self.yes_no_signal.emit(msg)
self.done.wait()
return self.ok
def show_message(self, msg, on_cancel=None):
self.message_signal.emit(msg, on_cancel)
def show_error(self, msg, blocking=False):
self.done.clear()
self.error_signal.emit(msg, blocking)
if blocking:
self.done.wait()
def finished(self):
self.clear_signal.emit()
def get_word(self, msg):
self.done.clear()
self.word_signal.emit(msg)
self.done.wait()
return self.word
def get_passphrase(self, msg, confirm):
self.done.clear()
self.passphrase_signal.emit(msg, confirm)
self.done.wait()
return self.passphrase
def passphrase_dialog(self, msg, confirm):
# If confirm is true, require the user to enter the passphrase twice
parent = self.top_level_window()
if confirm:
d = PasswordDialog(parent, None, msg, PW_PASSPHRASE)
confirmed, p, passphrase = d.run()
else:
d = WindowModalDialog(parent, _("Enter Passphrase"))
pw = QLineEdit()
pw.setEchoMode(2)
pw.setMinimumWidth(200)
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(msg))
vbox.addWidget(pw)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
d.setLayout(vbox)
passphrase = pw.text() if d.exec_() else None
self.passphrase = passphrase
self.done.set()
def word_dialog(self, msg):
dialog = WindowModalDialog(self.top_level_window(), "")
hbox = QHBoxLayout(dialog)
hbox.addWidget(QLabel(msg))
text = QLineEdit()
text.setMaximumWidth(100)
text.returnPressed.connect(dialog.accept)
hbox.addWidget(text)
hbox.addStretch(1)
dialog.exec_() # Firmware cannot handle cancellation
self.word = text.text()
self.done.set()
def message_dialog(self, msg, on_cancel):
# Called more than once during signing, to confirm output and fee
self.clear_dialog()
title = _('Please check your {} device').format(self.device)
self.dialog = dialog = WindowModalDialog(self.top_level_window(), title)
l = QLabel(msg)
vbox = QVBoxLayout(dialog)
vbox.addWidget(l)
if on_cancel:
dialog.rejected.connect(on_cancel)
vbox.addLayout(Buttons(CancelButton(dialog)))
dialog.show()
def error_dialog(self, msg, blocking):
self.win.show_error(msg, parent=self.top_level_window())
if blocking:
self.done.set()
def clear_dialog(self):
if self.dialog:
self.dialog.accept()
self.dialog = None
def win_query_choice(self, msg, labels):
self.choice = self.win.query_choice(msg, labels)
self.done.set()
def win_yes_no_question(self, msg):
self.ok = self.win.question(msg)
self.done.set()
from electrum.plugin import hook
from electrum.util import UserCancelled
from electrum.gui.qt.main_window import StatusBarButton
class QtPluginBase(object):
@hook
def load_wallet(self, wallet, window):
for keystore in wallet.get_keystores():
if not isinstance(keystore, self.keystore_class):
continue
if not self.libraries_available:
message = keystore.plugin.get_library_not_available_message()
window.show_error(message)
return
tooltip = self.device + '\n' + (keystore.label or 'unnamed')
cb = partial(self.show_settings_dialog, window, keystore)
button = StatusBarButton(QIcon(self.icon_unpaired), tooltip, cb)
button.icon_paired = self.icon_paired
button.icon_unpaired = self.icon_unpaired
window.statusBar().addPermanentWidget(button)
handler = self.create_handler(window)
handler.button = button
keystore.handler = handler
keystore.thread = TaskThread(window, window.on_error)
self.add_show_address_on_hw_device_button_for_receive_addr(wallet, keystore, window)
# Trigger a pairing
keystore.thread.add(partial(self.get_client, keystore))
def choose_device(self, window, keystore):
'''This dialog box should be usable even if the user has
forgotten their PIN or it is in bootloader mode.'''
device_id = self.device_manager().xpub_id(keystore.xpub)
if not device_id:
try:
info = self.device_manager().select_device(self, keystore.handler, keystore)
except UserCancelled:
return
device_id = info.device.id_
return device_id
def show_settings_dialog(self, window, keystore):
device_id = self.choose_device(window, keystore)
def add_show_address_on_hw_device_button_for_receive_addr(self, wallet, keystore, main_window):
plugin = keystore.plugin
receive_address_e = main_window.receive_address_e
def show_address():
addr = receive_address_e.text()
keystore.thread.add(partial(plugin.show_address, wallet, addr, keystore))
receive_address_e.addButton(":icons/eye1.png", show_address, _("Show on {}").format(plugin.device))
|
|
import time
import numpy as np
from numpy.lib.function_base import average
import taichi as ti
ti.init(arch=ti.gpu)
res = (800, 800)
color_buffer = ti.Vector.field(3, dtype=ti.f32, shape=res)
count_var = ti.field(ti.i32, shape=(1, ))
tonemapped_buffer = ti.Vector.field(3, dtype=ti.f32, shape=res)
max_ray_depth = 10
eps = 1e-4
inf = 1e10
fov = 0.8
camera_pos = ti.Vector([0.0, 0.6, 3.0])
mat_none = 0
mat_lambertian = 1
mat_specular = 2
mat_glass = 3
mat_light = 4
light_y_pos = 2.0 - eps
light_x_min_pos = -0.25
light_x_range = 0.5
light_z_min_pos = 1.0
light_z_range = 0.12
light_area = light_x_range * light_z_range
light_min_pos = ti.Vector([light_x_min_pos, light_y_pos, light_z_min_pos])
light_max_pos = ti.Vector([
light_x_min_pos + light_x_range, light_y_pos,
light_z_min_pos + light_z_range
])
light_color = ti.Vector(list(np.array([0.9, 0.85, 0.7])))
light_normal = ti.Vector([0.0, -1.0, 0.0])
# No absorbtion, integrates over a unit hemisphere
lambertian_brdf = 1.0 / np.pi
# diamond!
refr_idx = 2.4
# right sphere
sp1_center = ti.Vector([0.4, 0.225, 1.75])
sp1_radius = 0.22
def make_box_transform_matrices():
rad = np.pi / 8.0
c, s = np.cos(rad), np.sin(rad)
rot = np.array([[c, 0, s, 0], [0, 1, 0, 0], [-s, 0, c, 0], [0, 0, 0, 1]])
translate = np.array([
[1, 0, 0, -0.7],
[0, 1, 0, 0],
[0, 0, 1, 0.7],
[0, 0, 0, 1],
])
m = translate @ rot
m_inv = np.linalg.inv(m)
m_inv_t = np.transpose(m_inv)
return ti.Matrix(m_inv), ti.Matrix(m_inv_t)
# left box
box_min = ti.Vector([0.0, 0.0, 0.0])
box_max = ti.Vector([0.55, 1.1, 0.55])
box_m_inv, box_m_inv_t = make_box_transform_matrices()
@ti.func
def reflect(d, n):
# Assuming |d| and |n| are normalized
return d - 2.0 * d.dot(n) * n
@ti.func
def refract(d, n, ni_over_nt):
# Assuming |d| and |n| are normalized
has_r, rd = 0, d
dt = d.dot(n)
discr = 1.0 - ni_over_nt * ni_over_nt * (1.0 - dt * dt)
if discr > 0.0:
has_r = 1
rd = (ni_over_nt * (d - n * dt) - n * ti.sqrt(discr)).normalized()
else:
rd *= 0.0
return has_r, rd
@ti.func
def mat_mul_point(m, p):
hp = ti.Vector([p[0], p[1], p[2], 1.0])
hp = m @ hp
hp /= hp[3]
return ti.Vector([hp[0], hp[1], hp[2]])
@ti.func
def mat_mul_vec(m, v):
hv = ti.Vector([v[0], v[1], v[2], 0.0])
hv = m @ hv
return ti.Vector([hv[0], hv[1], hv[2]])
@ti.func
def intersect_sphere(pos, d, center, radius):
T = pos - center
A = 1.0
B = 2.0 * T.dot(d)
C = T.dot(T) - radius * radius
delta = B * B - 4.0 * A * C
dist = inf
hit_pos = ti.Vector([0.0, 0.0, 0.0])
if delta > -1e-4:
delta = ti.max(delta, 0)
sdelta = ti.sqrt(delta)
ratio = 0.5 / A
ret1 = ratio * (-B - sdelta)
dist = ret1
if dist < inf:
# refinement
old_dist = dist
new_pos = pos + d * dist
T = new_pos - center
A = 1.0
B = 2.0 * T.dot(d)
C = T.dot(T) - radius * radius
delta = B * B - 4 * A * C
if delta > 0:
sdelta = ti.sqrt(delta)
ratio = 0.5 / A
ret1 = ratio * (-B - sdelta) + old_dist
if ret1 > 0:
dist = ret1
hit_pos = new_pos + ratio * (-B - sdelta) * d
else:
dist = inf
return dist, hit_pos
@ti.func
def intersect_plane(pos, d, pt_on_plane, norm):
dist = inf
hit_pos = ti.Vector([0.0, 0.0, 0.0])
denom = d.dot(norm)
if abs(denom) > eps:
dist = norm.dot(pt_on_plane - pos) / denom
hit_pos = pos + d * dist
return dist, hit_pos
@ti.func
def intersect_aabb(box_min, box_max, o, d):
intersect = 1
near_t = -inf
far_t = inf
near_face = 0
near_is_max = 0
for i in ti.static(range(3)):
if d[i] == 0:
if o[i] < box_min[i] or o[i] > box_max[i]:
intersect = 0
else:
i1 = (box_min[i] - o[i]) / d[i]
i2 = (box_max[i] - o[i]) / d[i]
new_far_t = max(i1, i2)
new_near_t = min(i1, i2)
new_near_is_max = i2 < i1
far_t = min(new_far_t, far_t)
if new_near_t > near_t:
near_t = new_near_t
near_face = int(i)
near_is_max = new_near_is_max
near_norm = ti.Vector([0.0, 0.0, 0.0])
if near_t > far_t:
intersect = 0
if intersect:
for i in ti.static(range(2)):
if near_face == i:
near_norm[i] = -1 + near_is_max * 2
return intersect, near_t, far_t, near_norm
@ti.func
def intersect_aabb_transformed(box_min, box_max, o, d):
# Transform the ray to the box's local space
obj_o = mat_mul_point(box_m_inv, o)
obj_d = mat_mul_vec(box_m_inv, d)
intersect, near_t, _, near_norm = intersect_aabb(box_min, box_max, obj_o,
obj_d)
if intersect and 0 < near_t:
# Transform the normal in the box's local space to world space
near_norm = mat_mul_vec(box_m_inv_t, near_norm)
else:
intersect = 0
return intersect, near_t, near_norm
@ti.func
def intersect_light(pos, d, tmax):
hit, t, far_t, near_norm = intersect_aabb(light_min_pos, light_max_pos,
pos, d)
if hit and 0 < t < tmax:
hit = 1
else:
hit = 0
t = inf
return hit, t
@ti.func
def intersect_scene(pos, ray_dir):
closest, normal = inf, ti.Vector.zero(ti.f32, 3)
c, mat = ti.Vector.zero(ti.f32, 3), mat_none
# right near sphere
cur_dist, hit_pos = intersect_sphere(pos, ray_dir, sp1_center, sp1_radius)
if 0 < cur_dist < closest:
closest = cur_dist
normal = (hit_pos - sp1_center).normalized()
c, mat = ti.Vector([1.0, 1.0, 1.0]), mat_glass
# left box
hit, cur_dist, pnorm = intersect_aabb_transformed(box_min, box_max, pos,
ray_dir)
if hit and 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c, mat = ti.Vector([0.8, 0.5, 0.4]), mat_specular
# left
pnorm = ti.Vector([1.0, 0.0, 0.0])
cur_dist, _ = intersect_plane(pos, ray_dir, ti.Vector([-1.1, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c, mat = ti.Vector([0.65, 0.05, 0.05]), mat_lambertian
# right
pnorm = ti.Vector([-1.0, 0.0, 0.0])
cur_dist, _ = intersect_plane(pos, ray_dir, ti.Vector([1.1, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c, mat = ti.Vector([0.12, 0.45, 0.15]), mat_lambertian
# bottom
gray = ti.Vector([0.93, 0.93, 0.93])
pnorm = ti.Vector([0.0, 1.0, 0.0])
cur_dist, _ = intersect_plane(pos, ray_dir, ti.Vector([0.0, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c, mat = gray, mat_lambertian
# top
pnorm = ti.Vector([0.0, -1.0, 0.0])
cur_dist, _ = intersect_plane(pos, ray_dir, ti.Vector([0.0, 2.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c, mat = gray, mat_lambertian
# far
pnorm = ti.Vector([0.0, 0.0, 1.0])
cur_dist, _ = intersect_plane(pos, ray_dir, ti.Vector([0.0, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c, mat = gray, mat_lambertian
# light
hit_l, cur_dist = intersect_light(pos, ray_dir, closest)
if hit_l and 0 < cur_dist < closest:
# technically speaking, no need to check the second term
closest = cur_dist
normal = light_normal
c, mat = light_color, mat_light
return closest, normal, c, mat
@ti.func
def visible_to_light(pos, ray_dir):
# eps*ray_dir is easy way to prevent rounding error
# here is best way to check the float precision:
# http://www.pbr-book.org/3ed-2018/Shapes/Managing_Rounding_Error.html
a, b, c, mat = intersect_scene(pos + eps * ray_dir, ray_dir)
return mat == mat_light
@ti.func
def dot_or_zero(n, l):
return max(0.0, n.dot(l))
@ti.func
def mis_power_heuristic(pf, pg):
# Assume 1 sample for each distribution
f = pf**2
g = pg**2
return f / (f + g)
@ti.func
def compute_area_light_pdf(pos, ray_dir):
hit_l, t = intersect_light(pos, ray_dir, inf)
pdf = 0.0
if hit_l:
l_cos = light_normal.dot(-ray_dir)
if l_cos > eps:
tmp = ray_dir * t
dist_sqr = tmp.dot(tmp)
pdf = dist_sqr / (light_area * l_cos)
return pdf
@ti.func
def compute_brdf_pdf(normal, sample_dir):
return dot_or_zero(normal, sample_dir) / np.pi
@ti.func
def sample_area_light(hit_pos, pos_normal):
# sampling inside the light area
x = ti.random() * light_x_range + light_x_min_pos
z = ti.random() * light_z_range + light_z_min_pos
on_light_pos = ti.Vector([x, light_y_pos, z])
return (on_light_pos - hit_pos).normalized()
@ti.func
def sample_brdf(normal):
# cosine hemisphere sampling
# Uniformly sample on a disk using concentric sampling(r, theta)
# https://www.pbr-book.org/3ed-2018/Monte_Carlo_Integration/2D_Sampling_with_Multidimensional_Transformations#CosineSampleHemisphere
r, theta = 0.0, 0.0
sx = ti.random() * 2.0 - 1.0
sy = ti.random() * 2.0 - 1.0
if sx != 0 or sy != 0:
if abs(sx) > abs(sy):
r = sx
theta = np.pi / 4 * (sy / sx)
else:
r = sy
theta = np.pi / 4 * (2 - sx / sy)
# Apply Malley's method to project disk to hemisphere
u = ti.Vector([1.0, 0.0, 0.0])
if abs(normal[1]) < 1 - eps:
u = normal.cross(ti.Vector([0.0, 1.0, 0.0]))
v = normal.cross(u)
costt, sintt = ti.cos(theta), ti.sin(theta)
xy = (u * costt + v * sintt) * r
zlen = ti.sqrt(max(0.0, 1.0 - xy.dot(xy)))
return xy + zlen * normal
@ti.func
def sample_direct_light(hit_pos, hit_normal, hit_color):
direct_li = ti.Vector([0.0, 0.0, 0.0])
fl = lambertian_brdf * hit_color * light_color
light_pdf, brdf_pdf = 0.0, 0.0
# sample area light
to_light_dir = sample_area_light(hit_pos, hit_normal)
if to_light_dir.dot(hit_normal) > 0:
light_pdf = compute_area_light_pdf(hit_pos, to_light_dir)
brdf_pdf = compute_brdf_pdf(hit_normal, to_light_dir)
if light_pdf > 0 and brdf_pdf > 0:
l_visible = visible_to_light(hit_pos, to_light_dir)
if l_visible:
w = mis_power_heuristic(light_pdf, brdf_pdf)
nl = dot_or_zero(to_light_dir, hit_normal)
direct_li += fl * w * nl / light_pdf
# sample brdf
brdf_dir = sample_brdf(hit_normal)
brdf_pdf = compute_brdf_pdf(hit_normal, brdf_dir)
if brdf_pdf > 0:
light_pdf = compute_area_light_pdf(hit_pos, brdf_dir)
if light_pdf > 0:
l_visible = visible_to_light(hit_pos, brdf_dir)
if l_visible:
w = mis_power_heuristic(brdf_pdf, light_pdf)
nl = dot_or_zero(brdf_dir, hit_normal)
direct_li += fl * w * nl / brdf_pdf
return direct_li
@ti.func
def schlick(cos, eta):
r0 = (1.0 - eta) / (1.0 + eta)
r0 = r0 * r0
return r0 + (1 - r0) * ((1.0 - cos)**5)
@ti.func
def sample_ray_dir(indir, normal, hit_pos, mat):
u = ti.Vector([0.0, 0.0, 0.0])
pdf = 1.0
if mat == mat_lambertian:
u = sample_brdf(normal)
pdf = max(eps, compute_brdf_pdf(normal, u))
elif mat == mat_specular:
u = reflect(indir, normal)
elif mat == mat_glass:
cos = indir.dot(normal)
ni_over_nt = refr_idx
outn = normal
if cos > 0.0:
outn = -normal
cos = refr_idx * cos
else:
ni_over_nt = 1.0 / refr_idx
cos = -cos
has_refr, refr_dir = refract(indir, outn, ni_over_nt)
refl_prob = 1.0
if has_refr:
refl_prob = schlick(cos, refr_idx)
if ti.random() < refl_prob:
u = reflect(indir, normal)
else:
u = refr_dir
return u.normalized(), pdf
stratify_res = 5
inv_stratify = 1.0 / 5.0
@ti.kernel
def render():
for u, v in color_buffer:
aspect_ratio = res[0] / res[1]
pos = camera_pos
cur_iter = count_var[0]
str_x, str_y = (cur_iter / stratify_res), (cur_iter % stratify_res)
ray_dir = ti.Vector([
(2 * fov * (u + (str_x + ti.random()) * inv_stratify) / res[1] -
fov * aspect_ratio - 1e-5),
(2 * fov * (v + (str_y + ti.random()) * inv_stratify) / res[1] -
fov - 1e-5),
-1.0,
])
ray_dir = ray_dir.normalized()
acc_color = ti.Vector([0.0, 0.0, 0.0])
throughput = ti.Vector([1.0, 1.0, 1.0])
depth = 0
while depth < max_ray_depth:
closest, hit_normal, hit_color, mat = intersect_scene(pos, ray_dir)
if mat == mat_none:
break
hit_pos = pos + closest * ray_dir
hit_light = (mat == mat_light)
if hit_light:
acc_color += throughput * light_color
break
elif mat == mat_lambertian:
acc_color += throughput * sample_direct_light(
hit_pos, hit_normal, hit_color)
depth += 1
ray_dir, pdf = sample_ray_dir(ray_dir, hit_normal, hit_pos, mat)
pos = hit_pos + 1e-4 * ray_dir
if mat == mat_lambertian:
throughput *= lambertian_brdf * hit_color * dot_or_zero(
hit_normal, ray_dir) / pdf
else:
throughput *= hit_color
color_buffer[u, v] += acc_color
count_var[0] = (count_var[0] + 1) % (stratify_res * stratify_res)
@ti.kernel
def tonemap(accumulated: ti.f32):
for i, j in tonemapped_buffer:
tonemapped_buffer[i, j] = ti.sqrt(color_buffer[i, j] / accumulated *
100.0)
def main():
gui = ti.GUI('Cornell Box', res, fast_gui=True)
gui.fps_limit = 300
last_t = time.time()
i = 0
while gui.running:
render()
interval = 10
if i % interval == 0:
tonemap(i)
print("{:.2f} samples/s ({} iters)".format(
interval / (time.time() - last_t), i))
last_t = time.time()
gui.set_image(tonemapped_buffer)
gui.show()
i += 1
if __name__ == '__main__':
main()
|
|
import operator
from sys import version_info
from unittest import TestCase
from xml.etree import ElementTree
from genologics.entities import StepActions, Researcher, Artifact, \
Step, StepPlacements, Container, Stage, ReagentKit, ReagentLot, Sample, Project
from genologics.lims import Lims
if version_info[0] == 2:
from mock import patch, Mock
else:
from unittest.mock import patch, Mock
url = 'http://testgenologics.com:4040'
########
# Entities in XML
generic_artifact_xml = """<?xml version='1.0' encoding='utf-8'?>
<art:artifact xmlns:art="http://genologics.com/ri/artifact" xmlns:file="http://genologics.com/ri/file" xmlns:udf="http://genologics.com/ri/userdefined" uri="{url}/api/v2/artifacts/a1" limsid="a1">
<name>test_sample1</name>
<type>Analyte</type>
<output-type>Analyte</output-type>
<qc-flag>PASSED</qc-flag>
<location>
<container uri="{url}/api/v2/containers/c1" limsid="c1"/>
<value>A:1</value>
</location>
<working-flag>true</working-flag>
<sample uri="{url}/api/v2/samples/s1" limsid="s1"/>
<udf:field type="Numeric" name="Ave. Conc. (ng/uL)">1</udf:field>
<udf:field type="String" name="Workflow Desired">TruSeq Nano DNA Sample Prep</udf:field>
<workflow-stages>
<workflow-stage status="QUEUED" name="Test workflow s2" uri="{url}/api/v2/configuration/workflows/1/stages/2"/>
<workflow-stage status="COMPLETE" name="Test workflow s1" uri="{url}/api/v2/configuration/workflows/1/stages/1"/>
</workflow-stages>
</art:artifact>"""
generic_step_placements_xml = """<?xml version='1.0' encoding='utf-8'?>
<stp:placements xmlns:stp="http://genologics.com/ri/step" uri="{url}/steps/s1/placements">
<step uri="{url}/steps/s1" />
<configuration uri="{url}/configuration/protocols/1/steps/1">Step name</configuration>
<selected-containers>
<container uri="{url}/containers/{container}" />
</selected-containers>
<output-placements>
<output-placement uri="{url}/artifacts/a1">
<location>
<container limsid="{container}" uri="{url}/containers/{container}" />
<value>{loc1}</value>
</location>
</output-placement>
<output-placement uri="{url}/artifacts/a2">
<location>
<container limsid="{container}" uri="{url}/containers/{container}" />
<value>{loc2}</value>
</location>
</output-placement>
</output-placements>
</stp:placements>"""
generic_reagentkit_xml = """<?xml version='1.0' encoding='utf-8'?>
<kit:reagent-kit xmlns:kit="http://genologics.com/ri/reagentkit" uri="{url}:8080/api/v2/reagentkits/r1">
<name>regaentkitname</name>
<supplier>reagentProvider</supplier>
<website>www.reagentprovider.com</website>
<archived>false</archived>
</kit:reagent-kit>"""
generic_reagentlot_xml = """<?xml version='1.0' encoding='utf-8'?>
<lot:reagent-lot xmlns:lot="http://genologics.com/ri/reagentlot" limsid="l1" uri="{url}/api/v2/reagentlots/l1">
<reagent-kit uri="{url}/api/v2/reagentkits/r1" name="kitname"/>
<name>kitname</name>
<lot-number>100</lot-number>
<created-date>2015-07-16</created-date>
<last-modified-date>2015-08-17</last-modified-date>
<expiry-date>2022-08-16</expiry-date>
<created-by uri="{url}/api/v2/researchers/1"/>
<last-modified-by uri="{url}/api/v2/researchers/1"/>
<status>ARCHIVED</status>
<usage-count>1</usage-count>
</lot:reagent-lot>"""
generic_step_actions_xml = """<stp:actions xmlns:stp="http://genologics.com/ri/step" uri="...">
<step rel="..." uri="{url}/steps/s1">
</step>
<configuration uri="{url}/config/1">...</configuration>
<next-actions>
<next-action artifact-uri="{url}/artifacts/a1" action="requeue" step-uri="..." rework-step-uri="...">
</next-action>
</next-actions>
<escalation>
<request>
<author uri="{url}/researchers/r1">
<first-name>foo</first-name>
<last-name>bar</last-name>
</author>
<reviewer uri="{url}/researchers/r1">
<first-name>foo</first-name>
<last-name>bar</last-name>
</reviewer>
<date>01-01-1970</date>
<comment>no comments</comment>
</request>
<review>
<author uri="{url}/researchers/r1">
<first-name>foo</first-name>
<last-name>bar</last-name>
</author>
<date>01-01-1970</date>
<comment>no comments</comment>
</review>
<escalated-artifacts>
<escalated-artifact uri="{url}/artifacts/r1">
</escalated-artifact>
</escalated-artifacts>
</escalation>
</stp:actions>"""
generic_step_actions_no_escalation_xml = """<stp:actions xmlns:stp="http://genologics.com/ri/step" uri="...">
<step rel="..." uri="{url}/steps/s1">
</step>
<configuration uri="{url}/config/1">...</configuration>
<next-actions>
<next-action artifact-uri="{url}/artifacts/a1" action="requeue" step-uri="{url}/steps/s1" rework-step-uri="{url}/steps/s2">
</next-action>
</next-actions>
</stp:actions>"""
generic_sample_creation_xml = """
<smp:samplecreation xmlns:smp="http://genologics.com/ri/sample" limsid="s1" uri="{url}/api/v2/samples/s1">
<location>
<container limsid="cont1" uri="{url}/api/v2/containers/cont1">
</container>
<value>1:1</value>
</location>
<name>
sample1
</name>
<project uri="{url}/api/v2/projects/p1" limsid="p1">
</project>
</smp:samplecreation>
"""
class TestEntities(TestCase):
def test_pass(self):
pass
def elements_equal(e1, e2):
if e1.tag != e2.tag:
print('Tag: %s != %s'%(e1.tag, e2.tag))
return False
if e1.text and e2.text and e1.text.strip() != e2.text.strip():
print('Text: %s != %s' % (e1.text.strip(), e2.text.strip()))
return False
if e1.tail and e2.tail and e1.tail.strip() != e2.tail.strip():
print('Tail: %s != %s' % (e1.tail.strip(), e2.tail.strip()))
return False
if e1.attrib != e2.attrib:
print('Attrib: %s != %s' % (e1.attrib, e2.attrib))
return False
if len(e1) != len(e2):
print('length %s (%s) != length (%s) ' % (e1.tag, len(e1), e2.tag, len(e2)))
return False
return all(elements_equal(c1, c2) for c1, c2 in zip(sorted(e1, key=lambda x: x.tag), sorted(e2, key=lambda x: x.tag)))
class TestEntities(TestCase):
dummy_xml = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<dummy></dummy>"""
def setUp(self):
self.lims = Lims(url, username='test', password='password')
def _tostring(self, entity):
return self.lims.tostring(ElementTree.ElementTree(entity.root)).decode("utf-8")
class TestStepActions(TestEntities):
step_actions_xml = generic_step_actions_xml.format(url=url)
step_actions_no_escalation_xml = generic_step_actions_no_escalation_xml.format(url=url)
def test_escalation(self):
s = StepActions(uri=self.lims.get_uri('steps', 'step_id', 'actions'), lims=self.lims)
with patch('requests.Session.get', return_value=Mock(content=self.step_actions_xml, status_code=200)):
with patch('requests.post', return_value=Mock(content=self.dummy_xml, status_code=200)):
r = Researcher(uri='http://testgenologics.com:4040/researchers/r1', lims=self.lims)
a = Artifact(uri='http://testgenologics.com:4040/artifacts/r1', lims=self.lims)
expected_escalation = {
'status': 'Reviewed',
'author': r,
'artifacts': [a], 'request': 'no comments',
'answer': 'no comments',
'reviewer': r}
assert s.escalation == expected_escalation
def test_next_actions(self):
s = StepActions(uri=self.lims.get_uri('steps', 'step_id', 'actions'), lims=self.lims)
with patch('requests.Session.get',
return_value=Mock(content=self.step_actions_no_escalation_xml, status_code=200)):
step1 = Step(self.lims, uri='http://testgenologics.com:4040/steps/s1')
step2 = Step(self.lims, uri='http://testgenologics.com:4040/steps/s2')
artifact = Artifact(self.lims, uri='http://testgenologics.com:4040/artifacts/a1')
expected_next_actions = [{'artifact': artifact, 'action': 'requeue',
'step': step1, 'rework-step': step2}]
assert s.next_actions == expected_next_actions
class TestStepPlacements(TestEntities):
original_step_placements_xml = generic_step_placements_xml.format(url=url, container="c1", loc1='1:1', loc2='2:1')
modloc_step_placements_xml = generic_step_placements_xml.format(url=url, container="c1", loc1='3:1', loc2='4:1')
modcont_step_placements_xml = generic_step_placements_xml.format(url=url, container="c2", loc1='1:1', loc2='1:1')
def test_get_placements_list(self):
s = StepPlacements(uri=self.lims.get_uri('steps', 's1', 'placements'), lims=self.lims)
with patch('requests.Session.get',
return_value=Mock(content=self.original_step_placements_xml, status_code=200)):
a1 = Artifact(uri='http://testgenologics.com:4040/artifacts/a1', lims=self.lims)
a2 = Artifact(uri='http://testgenologics.com:4040/artifacts/a2', lims=self.lims)
c1 = Container(uri='http://testgenologics.com:4040/containers/c1', lims=self.lims)
expected_placements = [[a1, (c1, '1:1')], [a2, (c1, '2:1')]]
assert s.get_placement_list() == expected_placements
def test_set_placements_list(self):
a1 = Artifact(uri='http://testgenologics.com:4040/artifacts/a1', lims=self.lims)
a2 = Artifact(uri='http://testgenologics.com:4040/artifacts/a2', lims=self.lims)
c1 = Container(uri='http://testgenologics.com:4040/containers/c1', lims=self.lims)
c2 = Container(uri='http://testgenologics.com:4040/containers/c2', lims=self.lims)
s = StepPlacements(uri=self.lims.get_uri('steps', 's1', 'placements'), lims=self.lims)
with patch('requests.Session.get',
return_value=Mock(content=self.original_step_placements_xml, status_code=200)):
new_placements = [[a1, (c1, '3:1')], [a2, (c1, '4:1')]]
s.set_placement_list(new_placements)
assert elements_equal(s.root, ElementTree.fromstring(self.modloc_step_placements_xml))
def test_set_placements_list_fail(self):
a1 = Artifact(uri='http://testgenologics.com:4040/artifacts/a1', lims=self.lims)
a2 = Artifact(uri='http://testgenologics.com:4040/artifacts/a2', lims=self.lims)
c2 = Container(uri='http://testgenologics.com:4040/containers/c2', lims=self.lims)
s = StepPlacements(uri=self.lims.get_uri('steps', 's1', 'placements'), lims=self.lims)
with patch('requests.Session.get',
return_value=Mock(content=self.original_step_placements_xml, status_code=200)):
new_placements = [[a1, (c2, '1:1')], [a2, (c2, '1:1')]]
s.set_placement_list(new_placements)
assert elements_equal(s.root, ElementTree.fromstring(self.modcont_step_placements_xml))
class TestArtifacts(TestEntities):
root_artifact_xml = generic_artifact_xml.format(url=url)
def test_input_artifact_list(self):
a = Artifact(uri=self.lims.get_uri('artifacts', 'a1'), lims=self.lims)
with patch('requests.Session.get', return_value=Mock(content=self.root_artifact_xml, status_code=200)):
assert a.input_artifact_list() == []
def test_workflow_stages_and_statuses(self):
a = Artifact(uri=self.lims.get_uri('artifacts', 'a1'), lims=self.lims)
expected_wf_stage = [
(Stage(self.lims, uri=url + '/api/v2/configuration/workflows/1/stages/2'), 'QUEUED', 'Test workflow s2'),
(Stage(self.lims, uri=url + '/api/v2/configuration/workflows/1/stages/1'), 'COMPLETE', 'Test workflow s1')
]
with patch('requests.Session.get', return_value=Mock(content=self.root_artifact_xml, status_code=200)):
assert a.workflow_stages_and_statuses == expected_wf_stage
class TestReagentKits(TestEntities):
url = 'http://testgenologics.com:4040'
reagentkit_xml = generic_reagentkit_xml.format(url=url)
def test_parse_entity(self):
r = ReagentKit(uri=self.lims.get_uri('reagentkits', 'r1'), lims=self.lims)
with patch('requests.Session.get', return_value=Mock(content=self.reagentkit_xml, status_code=200)):
assert r.name == 'regaentkitname'
assert r.supplier == 'reagentProvider'
assert r.website == 'www.reagentprovider.com'
assert r.archived == False
def test_create_entity(self):
with patch('genologics.lims.requests.post', return_value=Mock(content=self.reagentkit_xml, status_code=201)):
r = ReagentKit.create(self.lims, name='regaentkitname', supplier='reagentProvider',
website='www.reagentprovider.com', archived=False)
self.assertRaises(TypeError, ReagentKit.create, self.lims, error='test')
class TestReagentLots(TestEntities):
reagentlot_xml = generic_reagentlot_xml.format(url=url)
reagentkit_xml = generic_reagentkit_xml.format(url=url)
def test_parse_entity(self):
l = ReagentLot(uri=self.lims.get_uri('reagentkits', 'r1'), lims=self.lims)
with patch('requests.Session.get', return_value=Mock(content=self.reagentlot_xml, status_code=200)):
assert l.uri
assert l.name == 'kitname'
assert l.lot_number == '100'
assert l.status == 'ARCHIVED'
def test_create_entity(self):
with patch('requests.Session.get', return_value=Mock(content=self.reagentkit_xml, status_code=200)):
r = ReagentKit(uri=self.lims.get_uri('reagentkits', 'r1'), lims=self.lims)
with patch('genologics.lims.requests.post',
return_value=Mock(content=self.reagentlot_xml, status_code=201)) as patch_post:
l = ReagentLot.create(
self.lims,
reagent_kit=r,
name='kitname',
lot_number='100',
expiry_date='2020-05-01',
status='ACTIVE'
)
assert l.uri
assert l.name == 'kitname'
assert l.lot_number == '100'
class TestSample(TestEntities):
sample_creation = generic_sample_creation_xml.format(url=url)
def test_create_entity(self):
with patch('genologics.lims.requests.post',
return_value=Mock(content=self.sample_creation, status_code=201)) as patch_post:
l = Sample.create(
self.lims,
project=Project(self.lims, uri='project'),
container=Container(self.lims, uri='container'),
position='1:1',
name='s1',
)
data = '''<?xml version=\'1.0\' encoding=\'utf-8\'?>
<smp:samplecreation xmlns:smp="http://genologics.com/ri/sample">
<name>s1</name>
<project uri="project" />
<location>
<container uri="container" />
<value>1:1</value>
</location>
</smp:samplecreation>'''
assert elements_equal(ElementTree.fromstring(patch_post.call_args_list[0][1]['data']), ElementTree.fromstring(data))
|
|
##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import unittest
import os
from IECore import *
import IECoreRI
class SimpleProcedural( Renderer.Procedural ) :
def __init__( self, scale ) :
Renderer.Procedural.__init__( self )
self.__scale = scale
self.__t = StringData( "hello" )
self.__c = CompoundData()
self.__c["a"] = IntData( 4 )
def bound( self ) :
return Box3f( V3f( -self.__scale ), V3f( self.__scale ) )
def render( self, renderer ) :
self.rendererTypeName = renderer.typeName()
self.rendererTypeId = renderer.typeId()
renderer.transformBegin()
m = M44f()
m.scale( V3f( self.__scale ) )
renderer.concatTransform( m )
renderer.transformEnd()
def hash( self ):
h = MurmurHash()
return h
class RendererTest( IECoreRI.TestCase ) :
def loadShader( self, shader ) :
return IECoreRI.SLOReader( os.path.join( os.environ["SHADER_PATH"], shader + ".sdl" ) ).read()
def testTypeId( self ) :
self.assertEqual( IECoreRI.Renderer().typeId(), IECoreRI.Renderer.staticTypeId() )
self.assertNotEqual( IECoreRI.Renderer.staticTypeId(), Renderer.staticTypeId() )
def testTypeName( self ) :
r = IECoreRI.Renderer()
self.assertEqual( r.typeName(), "IECoreRI::Renderer" )
def test( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/test.rib" )
r.setOption( "ri:searchpath:shader", StringData( os.environ["SHADER_PATH"] ) )
r.setOption( "ri:render:bucketorder", StringData( "zigzag" ) )
r.setOption( "user:magicNumber", IntData( 42 ) )
r.setOption( "ri:pixelSamples", V2iData( V2i( 8, 8 ) ) )
r.worldBegin()
r.transformBegin()
r.attributeBegin()
self.loadShader( "plastic" ).render( r )
Reader.create( "test/IECoreRI/data/sphere.cob" ).read().render( r )
r.attributeEnd()
r.transformEnd()
r.worldEnd()
l = "".join( file( "test/IECoreRI/output/test.rib" ).readlines() ).replace( "\n", "" )
self.failUnless( 'Option "render" "string bucketorder" [ "zigzag" ]' in l )
self.failUnless( 'Option "user" "int magicNumber"' in l )
self.failUnless( 'PixelSamples 8 8' in l )
def testAttributes( self ) :
tests = [
# format is : name value expectedRib getAttributeShouldWork
( "ri:shadingRate", FloatData( 2 ), "ShadingRate 2", True ),
( "ri:matte", BoolData( 0 ), "Matte 0", True ),
( "ri:matte", BoolData( 1 ), "Matte 1", True ),
( "user:whatever", StringData( "whatever" ), "Attribute \"user\" \"string whatever\" [ \"whatever\" ]", True ),
( "ri:color", Color3fData( Color3f( 0, 1, 1 ) ), "Color [ 0 1 1 ]", False ),
( "color", Color3fData( Color3f( 1, 2, 3 ) ), "Color [ 1 2 3 ]", False ),
( "ri:opacity", Color3fData( Color3f( 1, 1, 1 ) ), "Opacity [ 1 1 1 ]", False ),
( "opacity", Color3fData( Color3f( 0, 1, 0 ) ), "Opacity [ 0 1 0 ]", False ),
( "ri:sides", IntData( 1 ), "Sides 1", False ),
( "ri:geometricApproximation:motionFactor", FloatData( 1 ), "GeometricApproximation \"motionfactor\" 1", False ),
( "ri:geometricApproximation:focusFactor", FloatData( 1 ), "GeometricApproximation \"focusfactor\" 1", False ),
( "ri:cull:hidden", IntData( 0 ), "Attribute \"cull\" \"int hidden\" [ 0 ]", False ),
( "name", StringData( "oioi" ), "Attribute \"identifier\" \"string name\" [ \"oioi\" ]", True ),
( "ri:trace:bias", FloatData( 2 ), "Attribute \"trace\" \"float bias\" [ 2 ]", True ),
]
for t in tests :
r = IECoreRI.Renderer( "test/IECoreRI/output/testAttributes.rib" )
r.worldBegin()
r.setAttribute( t[0], t[1] )
if t[3] :
self.assertEqual( r.getAttribute( t[0] ), t[1] )
r.worldEnd()
l = "".join( file( "test/IECoreRI/output/testAttributes.rib" ).readlines() )
l = " ".join( l.split() )
self.assert_( t[2] in l )
def testCompoundDataAttributes( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testAttributes.rib" )
with WorldBlock( r ) :
r.setAttribute(
"ri:displacementbound",
{
"sphere" : 10.0,
"coordinatesystem" : "shader",
},
)
lines = file( "test/IECoreRI/output/testAttributes.rib" ).readlines()
found = False
for line in lines :
if ( "Attribute \"displacementbound\"" in line and
"\"string coordinatesystem\" [ \"shader\" ]" in line and
"\"float sphere\" [ 10 ]" in line ) :
found = True
self.failUnless( found )
# check that we get appropriate warnings if not providing CompoundData
r = IECoreRI.Renderer( "test/IECoreRI/output/testAttributes.rib" )
with WorldBlock( r ) :
c = CapturingMessageHandler()
with c :
r.setAttribute( "ri:displacementbound", FloatData( 10 ) )
self.assertEqual( len( c.messages ), 1 )
self.assertEqual( c.messages[0].level, Msg.Level.Warning )
def testProcedural( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testProcedural.rib" )
r.worldBegin()
p = SimpleProcedural( 10.5 )
r.procedural( p )
r.worldEnd()
self.assertEqual( p.rendererTypeId, IECoreRI.Renderer.staticTypeId() )
self.assertEqual( p.rendererTypeName, "IECoreRI::Renderer" )
self.assertEqual( p.rendererTypeName, IECoreRI.Renderer.staticTypeName() )
def testGetOption( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testGetOption.rib" )
r.camera( "main", { "resolution" : V2iData( V2i( 1024, 768 ) ) } )
r.setOption( "ri:shutter:offset", FloatData( 10 ) )
r.worldBegin()
s = r.getOption( "shutter" )
self.assertEqual( s, V2fData( V2f( 0 ) ) )
self.assertEqual( r.getOption( "camera:resolution" ), V2iData( V2i( 1024, 768 ) ) )
self.assertEqual( r.getOption( "ri:shutter:offset" ), FloatData( 10 ) )
r.worldEnd()
def testDisplay( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testDisplay.rib" )
r.display( "test.tif", "tiff", "rgba", { "quantize" : FloatVectorData( [ 0, 1, 0, 1 ] ) } )
r.worldBegin()
r.worldEnd()
l = "".join( file( "test/IECoreRI/output/testDisplay.rib" ).readlines() ).replace( "\n", "" )
self.failUnless( 'Display "test.tif" "tiff" "rgba" "float quantize[4]" [ 0 1 0 1 ]' in l )
def testSubDivs( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/subdiv.rib" )
r.display( "test", "idisplay", "rgba", {} )
r.worldBegin()
t = M44f()
t.translate( V3f( 0, 0, 10 ) )
r.concatTransform( t )
m = ObjectReader( "test/IECoreRI/data/openSubDivCube.cob" ).read()
m.render( r )
r.worldEnd()
l = "".join( file( "test/IECoreRI/output/subdiv.rib" ).readlines() ).replace( "\n", "" )
self.failUnless( 'SubdivisionMesh "catmull-clark" [ 4 4 4 4 4 ]' in l )
self.failUnless( '[ "interpolateboundary" ] [ 0 0 ] [ ] [ ]' in l )
self.failUnless( 'vertex point P' in l )
self.failUnless( 'facevarying float s' in l )
self.failUnless( 'facevarying float t' in l )
def testSubDivTags( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/subdiv.rib" )
r.display( "test", "idisplay", "rgba", {} )
r.worldBegin()
t = M44f()
t.translate( V3f( 0, 0, 10 ) )
r.concatTransform( t )
m = ObjectReader( "test/IECoreRI/data/openSubDivCube.cob" ).read()
m["tags"] = PrimitiveVariable(
PrimitiveVariable.Interpolation.Constant,
CompoundData( {
"names" : StringVectorData( [ "interpolateboundary", "facevaryinginterpolateboundary" ] ),
"nArgs" : IntVectorData( [ 1, 0, 1, 0 ] ),
"floats" : FloatVectorData( [] ),
"integers" : IntVectorData( [ 1, 0 ] ),
} )
)
m.render( r )
r.worldEnd()
l = "".join( file( "test/IECoreRI/output/subdiv.rib" ).readlines() ).replace( "\n", "" )
self.failUnless( 'SubdivisionMesh "catmull-clark" [ 4 4 4 4 4 ]' in l )
self.failUnless( '[ "interpolateboundary" "facevaryinginterpolateboundary" ] [ 1 0 1 0 ] [ 1 0 ] [ ]' in l )
self.failUnless( 'vertex point P' in l )
self.failUnless( 'facevarying float s' in l )
self.failUnless( 'facevarying float t' in l )
def testCommands( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/commands.rib" )
r.worldBegin()
r.command( "ri:readArchive", { "name" : StringData( "nameOfArchive" ) } )
r.worldEnd()
l = "".join( file( "test/IECoreRI/output/commands.rib" ).readlines() ).replace( "\n", "" )
self.failUnless( 'ReadArchive "nameOfArchive"' in l )
def testMotion( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/motion.rib" )
r.worldBegin()
m = MatrixMotionTransform()
m[0] = M44f.createTranslated( V3f( 0, 1, 0 ) )
m[1] = M44f.createTranslated( V3f( 0, 10, 0 ) )
m.render( r )
r.worldEnd()
l = "".join( file( "test/IECoreRI/output/motion.rib" ).readlines() ).replace( "\n", "" )
self.failUnless( "MotionBegin [ 0 1 ]" in l )
self.assertEqual( l.count( "ConcatTransform" ), 2 )
self.failUnless( "MotionEnd" in l )
self.failUnless( l.index( "MotionBegin" ) < l.index( "ConcatTransform" ) )
self.failUnless( l.index( "ConcatTransform" ) < l.index( "MotionEnd" ) )
def testStringPrimVars( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/stringPrimVars.rib" )
r.worldBegin()
m = ObjectReader( "test/IECoreRI/data/stringPrimVars.cob" ).read()
m.render( r )
r.worldEnd()
l = "".join( file( "test/IECoreRI/output/stringPrimVars.rib" ).readlines() ).replace( "\n", "" )
self.failUnless( '"constant string ieGeneric_diffuse_Color_Textures" [ "woodTrain/woodTrainRed_v001_color_LIN.tdl" ]' in l )
self.failUnless( '"constant string ieGeneric_displacement_Textures" [ "woodTrain/woodTrain_v001_bump_LIN.tdl" ]' in l )
self.failUnless( '"constant string ieGeneric_reflection_Textures" [ "woodTrain/woodTrain_v001_bump_LIN.tdl" ]' in l )
def testGetTransform( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/transform.rib" )
r.worldBegin()
self.assertEqual( r.getTransform(), M44f() )
self.assertEqual( r.getTransform( "world" ), M44f() )
self.assertEqual( r.getTransform( "object" ), M44f() )
r.transformBegin()
t = M44f.createTranslated( V3f( 1, 2, 3 ) ) * M44f.createScaled( V3f( 2, 1, 0 ) ) * M44f.createRotated( V3f( 20, 0, 90 ) )
r.concatTransform( t )
self.assert_( r.getTransform( "object" ).equalWithAbsError( t, 0.000001 ) )
self.assert_( r.getTransform().equalWithAbsError( t, 0.000001 ) )
r.coordinateSystem( "coordSys" )
self.assert_( r.getTransform( "coordSys" ).equalWithAbsError( t, 0.000001 ) )
r.transformEnd()
self.assertEqual( r.getTransform(), M44f() )
self.assertEqual( r.getTransform( "world" ), M44f() )
self.assertEqual( r.getTransform( "object" ), M44f() )
self.assert_( r.getTransform( "coordSys" ).equalWithAbsError( t, 0.000001 ) )
r.worldEnd()
def testIgnoreOtherAttributesAndOptions( self ) :
with CapturingMessageHandler() as m :
r = IECoreRI.Renderer( "test/IECoreRI/output/transform.rib" )
# this should be silently ignored
r.setOption( "someOthereRenderer:someOtherOption", IntData( 10 ) )
r.worldBegin()
# this should be silently ignored
r.setAttribute( "someOtherRenderer:someOtherAttribute", IntData( 10 ) )
# as should this
self.assertEqual( r.getAttribute( "someOtherRenderer:someOtherAttribute" ), None )
# and this
self.assertEqual( r.getOption( "someOtherRenderer:someOtherOption" ), None )
r.worldEnd()
self.assertEqual( len( m.messages ), 0 )
def testMissingShaders( self ) :
"""Check that missing shaders don't throw an exception but print a message instead."""
with CapturingMessageHandler() as m :
r = IECoreRI.Renderer( "test/IECoreRI/output/missingShaders.rib" )
r.worldBegin()
r.shader( "surface", "aShaderWhichDoesntExist", {} )
r.worldEnd()
self.assertEqual( len( m.messages ), 1 )
self.assert_( "aShaderWhichDoesntExist" in m.messages[0].message )
def testGetUserOption( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/getUserOption.rib" )
o = {
"user:f" : FloatData( 10 ),
"user:i" : IntData( 100 ),
"user:s" : StringData( "hello" ),
"user:c" : Color3fData( Color3f( 1, 0, 0 ) ),
"user:v" : V3fData( V3f( 1, 2, 3 ) ),
"user:m" : M44fData( M44f.createTranslated( V3f( 1, 2, 3 ) ) ),
}
for k, v in o.items() :
r.setOption( k, v )
self.assertEqual( r.getOption( k ), v )
def testGetUserAttribute( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/getUserAttribute.rib" )
o = {
"user:f" : FloatData( 10 ),
"user:i" : IntData( 100 ),
"user:s" : StringData( "hello" ),
"user:c" : Color3fData( Color3f( 1, 0, 0 ) ),
"user:v" : V3fData( V3f( 1, 2, 3 ) ),
"user:m" : M44fData( M44f.createTranslated( V3f( 1, 2, 3 ) ) ),
}
for k, v in o.items() :
r.setAttribute( k, v )
self.assertEqual( r.getAttribute( k ), v )
def testFloat3ShaderParameters( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/types.sdl test/IECoreRI/shaders/types.sl" ), 0 )
r = IECoreRI.Renderer( "test/IECoreRI/output/test.rib" )
with WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/types", { "f3" : V3fData( V3f( 4, 5, 6 ) ) } )
l = "".join( file( "test/IECoreRI/output/test.rib" ).readlines() ).replace( "\n", "" )
self.failUnless( "Surface \"test/IECoreRI/shaders/types\"" in l )
self.failUnless( "\"float[3] f3\" [ 4 5 6 ]" in l )
def testFloat3PrimitiveVariables( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/types.sdl test/IECoreRI/shaders/types.sl" ), 0 )
r = IECoreRI.Renderer( "test/IECoreRI/output/test.rib" )
with WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/types", { "f3" : V3fData( V3f( 4, 5, 6 ) ) } )
r.mesh(
IntVectorData( [ 4, 4 ] ),
IntVectorData( [ 0, 1, 2, 3, 3, 2, 4, 5 ] ),
"linear",
{
"P" : PrimitiveVariable(
PrimitiveVariable.Interpolation.Vertex,
V3fVectorData( [ V3f( 0, 0, 0 ), V3f( 0, 1, 0 ), V3f( 1, 1, 0 ), V3f( 1, 0, 0 ), V3f( 2, 1, 0 ), V3f( 2, 0, 0 ) ] )
),
"f3" : PrimitiveVariable(
PrimitiveVariable.Interpolation.Uniform,
V3fVectorData( [ V3f( 0 ), V3f( 1 ) ] ),
)
}
)
l = "".join( file( "test/IECoreRI/output/test.rib" ).readlines() ).replace( "\n", "" )
self.failUnless( "\"uniform float[3] f3\" [ 0 0 0 1 1 1 ]" in l )
def testNullShaderParameters( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/types.sdl test/IECoreRI/shaders/types.sl" ), 0 )
r = IECoreRI.Renderer( "test/IECoreRI/output/test.rib" )
with WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/types", { "f3" : None } )
def testErrorsReportedForUnknownRenderManOptions( self ) :
with CapturingMessageHandler() as mh :
r = IECoreRI.Renderer( "test/IECoreRI/output/test.rib" )
r.setOption( "ri:unknownOption", StringData( "whatYouGonnaDo?" ) )
with WorldBlock( r ) :
pass
self.assertEqual( len( mh.messages ), 1 )
self.assertTrue( "ri:unknownOption" in mh.messages[0].message )
def testSetHiderViaOptions( self ) :
with CapturingMessageHandler() as mh :
r = IECoreRI.Renderer( "test/IECoreRI/output/test.rib" )
r.setOption( "ri:hider", "hidden" )
r.setOption( "ri:hider:jitter", True )
r.setOption( "ri:hider:depthfilter", "min" )
with WorldBlock( r ) :
pass
self.assertEqual( len( mh.messages ), 0 )
rib = "".join( file( "test/IECoreRI/output/test.rib" ).readlines() )
self.assertTrue( "Hider" in rib )
self.assertTrue( "hidden" in rib )
self.assertTrue( "jitter" in rib )
self.assertTrue( "depthfilter" in rib )
def testSetBucketSizeViaOptions( self ) :
with CapturingMessageHandler() as mh :
r = IECoreRI.Renderer( "test/IECoreRI/output/test.rib" )
r.setOption( "ri:limits:bucketsize", V2i( 32, 32 ) )
with WorldBlock( r ) :
pass
self.assertEqual( len( mh.messages ), 0 )
rib = "".join( file( "test/IECoreRI/output/test.rib" ).readlines() )
self.assertTrue( 'Option "limits" "integer bucketsize[2]" [ 32 32 ]' in rib )
def testTextureCoordinates( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/test.rib" )
with WorldBlock( r ) :
r.setAttribute( "ri:textureCoordinates", FloatVectorData( [ 0, 1, 2, 3, 4, 5, 6, 7 ] ) )
self.assertEqual( r.getAttribute( "ri:textureCoordinates" ), FloatVectorData( [ 0, 1, 2, 3, 4, 5, 6, 7 ] ) )
rib = "".join( file( "test/IECoreRI/output/test.rib" ).readlines() )
self.assertTrue( 'TextureCoordinates 0 1 2 3 4 5 6 7' in rib )
def testMultipleDisplays( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/test.rib" )
r.display( "test.exr", "exr", "rgba", { "quantize" : FloatVectorData( [ 0, 0, 0, 0 ] ) } )
r.display( "z.exr", "exr", "z", { "quantize" : FloatVectorData( [ 0, 0, 0, 0 ] ) } )
with WorldBlock( r ) :
pass
rib = "".join( file( "test/IECoreRI/output/test.rib" ).readlines() )
self.assertTrue( "+z.exr" in rib )
def tearDown( self ) :
IECoreRI.TestCase.tearDown( self )
files = [
"test/IECoreRI/shaders/types.sdl",
]
for f in files :
if os.path.exists( f ):
os.remove( f )
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.actionscript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for ActionScript and MXML.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, using, this, words, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['ActionScriptLexer', 'ActionScript3Lexer', 'MxmlLexer']
class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
.. versionadded:: 0.9
"""
name = 'ActionScript'
aliases = ['as', 'actionscript']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
flags = re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex),
(r'[~^*!%&<>|+=:;,/?\\-]+', Operator),
(r'[{}\[\]();.]+', Punctuation),
(words((
'case', 'default', 'for', 'each', 'in', 'while', 'do', 'break',
'return', 'continue', 'if', 'else', 'throw', 'try', 'catch',
'var', 'with', 'new', 'typeof', 'arguments', 'instanceof', 'this',
'switch'), suffix=r'\b'),
Keyword),
(words((
'class', 'public', 'final', 'internal', 'native', 'override', 'private',
'protected', 'static', 'import', 'extends', 'implements', 'interface',
'intrinsic', 'return', 'super', 'dynamic', 'function', 'const', 'get',
'namespace', 'package', 'set'), suffix=r'\b'),
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\b',
Keyword.Constant),
(words((
'Accessibility', 'AccessibilityProperties', 'ActionScriptVersion',
'ActivityEvent', 'AntiAliasType', 'ApplicationDomain', 'AsBroadcaster', 'Array',
'AsyncErrorEvent', 'AVM1Movie', 'BevelFilter', 'Bitmap', 'BitmapData',
'BitmapDataChannel', 'BitmapFilter', 'BitmapFilterQuality', 'BitmapFilterType',
'BlendMode', 'BlurFilter', 'Boolean', 'ByteArray', 'Camera', 'Capabilities', 'CapsStyle',
'Class', 'Color', 'ColorMatrixFilter', 'ColorTransform', 'ContextMenu',
'ContextMenuBuiltInItems', 'ContextMenuEvent', 'ContextMenuItem',
'ConvultionFilter', 'CSMSettings', 'DataEvent', 'Date', 'DefinitionError',
'DeleteObjectSample', 'Dictionary', 'DisplacmentMapFilter', 'DisplayObject',
'DisplacmentMapFilterMode', 'DisplayObjectContainer', 'DropShadowFilter',
'Endian', 'EOFError', 'Error', 'ErrorEvent', 'EvalError', 'Event', 'EventDispatcher',
'EventPhase', 'ExternalInterface', 'FileFilter', 'FileReference',
'FileReferenceList', 'FocusDirection', 'FocusEvent', 'Font', 'FontStyle', 'FontType',
'FrameLabel', 'FullScreenEvent', 'Function', 'GlowFilter', 'GradientBevelFilter',
'GradientGlowFilter', 'GradientType', 'Graphics', 'GridFitType', 'HTTPStatusEvent',
'IBitmapDrawable', 'ID3Info', 'IDataInput', 'IDataOutput', 'IDynamicPropertyOutput'
'IDynamicPropertyWriter', 'IEventDispatcher', 'IExternalizable',
'IllegalOperationError', 'IME', 'IMEConversionMode', 'IMEEvent', 'int',
'InteractiveObject', 'InterpolationMethod', 'InvalidSWFError', 'InvokeEvent',
'IOError', 'IOErrorEvent', 'JointStyle', 'Key', 'Keyboard', 'KeyboardEvent', 'KeyLocation',
'LineScaleMode', 'Loader', 'LoaderContext', 'LoaderInfo', 'LoadVars', 'LocalConnection',
'Locale', 'Math', 'Matrix', 'MemoryError', 'Microphone', 'MorphShape', 'Mouse', 'MouseEvent',
'MovieClip', 'MovieClipLoader', 'Namespace', 'NetConnection', 'NetStatusEvent',
'NetStream', 'NewObjectSample', 'Number', 'Object', 'ObjectEncoding', 'PixelSnapping',
'Point', 'PrintJob', 'PrintJobOptions', 'PrintJobOrientation', 'ProgressEvent', 'Proxy',
'QName', 'RangeError', 'Rectangle', 'ReferenceError', 'RegExp', 'Responder', 'Sample',
'Scene', 'ScriptTimeoutError', 'Security', 'SecurityDomain', 'SecurityError',
'SecurityErrorEvent', 'SecurityPanel', 'Selection', 'Shape', 'SharedObject',
'SharedObjectFlushStatus', 'SimpleButton', 'Socket', 'Sound', 'SoundChannel',
'SoundLoaderContext', 'SoundMixer', 'SoundTransform', 'SpreadMethod', 'Sprite',
'StackFrame', 'StackOverflowError', 'Stage', 'StageAlign', 'StageDisplayState',
'StageQuality', 'StageScaleMode', 'StaticText', 'StatusEvent', 'String', 'StyleSheet',
'SWFVersion', 'SyncEvent', 'SyntaxError', 'System', 'TextColorType', 'TextField',
'TextFieldAutoSize', 'TextFieldType', 'TextFormat', 'TextFormatAlign',
'TextLineMetrics', 'TextRenderer', 'TextSnapshot', 'Timer', 'TimerEvent', 'Transform',
'TypeError', 'uint', 'URIError', 'URLLoader', 'URLLoaderDataFormat', 'URLRequest',
'URLRequestHeader', 'URLRequestMethod', 'URLStream', 'URLVariabeles', 'VerifyError',
'Video', 'XML', 'XMLDocument', 'XMLList', 'XMLNode', 'XMLNodeType', 'XMLSocket',
'XMLUI'), suffix=r'\b'),
Name.Builtin),
(words((
'decodeURI', 'decodeURIComponent', 'encodeURI', 'escape', 'eval', 'isFinite', 'isNaN',
'isXMLName', 'clearInterval', 'fscommand', 'getTimer', 'getURL', 'getVersion',
'parseFloat', 'parseInt', 'setInterval', 'trace', 'updateAfterEvent',
'unescape'), suffix=r'\b'),
Name.Function),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
.. versionadded:: 0.11
"""
name = 'ActionScript 3'
aliases = ['as3', 'actionscript3']
filenames = ['*.as']
mimetypes = ['application/x-actionscript3', 'text/x-actionscript3',
'text/actionscript3']
identifier = r'[$a-zA-Z_]\w*'
typeidentifier = identifier + '(?:\.<\w+>)?'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'\s+', Text),
(r'(function\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword.Declaration, Name.Function, Text, Operator),
'funcparams'),
(r'(var|const)(\s+)(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r')',
bygroups(Keyword.Declaration, Text, Name, Text, Punctuation, Text,
Keyword.Type)),
(r'(import|package)(\s+)((?:' + identifier + r'|\.)+)(\s*)',
bygroups(Keyword, Text, Name.Namespace, Text)),
(r'(new)(\s+)(' + typeidentifier + r')(\s*)(\()',
bygroups(Keyword, Text, Keyword.Type, Text, Operator)),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^\n])*/[gisx]*', String.Regex),
(r'(\.)(' + identifier + r')', bygroups(Operator, Name.Attribute)),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|with|new|typeof|arguments|instanceof|this|'
r'switch|import|include|as|is)\b',
Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|void)\b',
Keyword.Constant),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b', Name.Function),
(identifier, Name),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[~^*!%&<>|+=:;,/?\\{}\[\]().-]+', Operator),
],
'funcparams': [
(r'\s+', Text),
(r'(\s*)(\.\.\.)?(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r'|\*)(\s*)',
bygroups(Text, Punctuation, Name, Text, Operator, Text,
Keyword.Type, Text), 'defval'),
(r'\)', Operator, 'type')
],
'type': [
(r'(\s*)(:)(\s*)(' + typeidentifier + r'|\*)',
bygroups(Text, Operator, Text, Keyword.Type), '#pop:2'),
(r'\s+', Text, '#pop:2'),
default('#pop:2')
],
'defval': [
(r'(=)(\s*)([^(),]+)(\s*)(,?)',
bygroups(Operator, Text, using(this), Text, Operator), '#pop'),
(r',', Operator, '#pop'),
default('#pop')
]
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text):
return 0.3
return 0
class MxmlLexer(RegexLexer):
"""
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
.. versionadded:: 1.1
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
aliases = ['mxml']
filenames = ['*.mxml']
mimetimes = ['text/xml', 'application/xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'(\<\!\[CDATA\[)(.*?)(\]\]\>)',
bygroups(String, using(ActionScript3Lexer), String)),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'SensordatatypeEnum' : _MetaInfoEnum('SensordatatypeEnum', 'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB',
{
'other':'other',
'unknown':'unknown',
'voltsAC':'voltsAC',
'voltsDC':'voltsDC',
'amperes':'amperes',
'watts':'watts',
'hertz':'hertz',
'celsius':'celsius',
'percentRH':'percentRH',
'rpm':'rpm',
'cmm':'cmm',
'truthvalue':'truthvalue',
'specialEnum':'specialEnum',
'dBm':'dBm',
}, 'CISCO-ENTITY-SENSOR-MIB', _yang_ns._namespaces['CISCO-ENTITY-SENSOR-MIB']),
'SensorthresholdrelationEnum' : _MetaInfoEnum('SensorthresholdrelationEnum', 'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB',
{
'lessThan':'lessThan',
'lessOrEqual':'lessOrEqual',
'greaterThan':'greaterThan',
'greaterOrEqual':'greaterOrEqual',
'equalTo':'equalTo',
'notEqualTo':'notEqualTo',
}, 'CISCO-ENTITY-SENSOR-MIB', _yang_ns._namespaces['CISCO-ENTITY-SENSOR-MIB']),
'SensordatascaleEnum' : _MetaInfoEnum('SensordatascaleEnum', 'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB',
{
'yocto':'yocto',
'zepto':'zepto',
'atto':'atto',
'femto':'femto',
'pico':'pico',
'nano':'nano',
'micro':'micro',
'milli':'milli',
'units':'units',
'kilo':'kilo',
'mega':'mega',
'giga':'giga',
'tera':'tera',
'exa':'exa',
'peta':'peta',
'zetta':'zetta',
'yotta':'yotta',
}, 'CISCO-ENTITY-SENSOR-MIB', _yang_ns._namespaces['CISCO-ENTITY-SENSOR-MIB']),
'SensorthresholdseverityEnum' : _MetaInfoEnum('SensorthresholdseverityEnum', 'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB',
{
'other':'other',
'minor':'minor',
'major':'major',
'critical':'critical',
}, 'CISCO-ENTITY-SENSOR-MIB', _yang_ns._namespaces['CISCO-ENTITY-SENSOR-MIB']),
'SensorstatusEnum' : _MetaInfoEnum('SensorstatusEnum', 'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB',
{
'ok':'ok',
'unavailable':'unavailable',
'nonoperational':'nonoperational',
}, 'CISCO-ENTITY-SENSOR-MIB', _yang_ns._namespaces['CISCO-ENTITY-SENSOR-MIB']),
'CiscoEntitySensorMib.Entsensorglobalobjects' : {
'meta_info' : _MetaInfoClass('CiscoEntitySensorMib.Entsensorglobalobjects',
False,
[
_MetaInfoClassMember('entSensorThreshNotifGlobalEnable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' This variable enables the generation of
entSensorThresholdNotification globally
on the device. If this object value is
'false', then no entSensorThresholdNotification
will be generated on this device. If this object
value is 'true', then whether a
entSensorThresholdNotification for a threshold will
be generated or not depends on the instance value of
entSensorThresholdNotificationEnable for that
threshold.
''',
'entsensorthreshnotifglobalenable',
'CISCO-ENTITY-SENSOR-MIB', False),
],
'CISCO-ENTITY-SENSOR-MIB',
'entSensorGlobalObjects',
_yang_ns._namespaces['CISCO-ENTITY-SENSOR-MIB'],
'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB'
),
},
'CiscoEntitySensorMib.Entsensorvaluetable.Entsensorvalueentry' : {
'meta_info' : _MetaInfoClass('CiscoEntitySensorMib.Entsensorvaluetable.Entsensorvalueentry',
False,
[
_MetaInfoClassMember('entPhysicalIndex', ATTRIBUTE, 'int' , None, None,
[('1', '2147483647')], [],
''' ''',
'entphysicalindex',
'CISCO-ENTITY-SENSOR-MIB', True),
_MetaInfoClassMember('entSensorMeasuredEntity', ATTRIBUTE, 'int' , None, None,
[('0', '2147483647')], [],
''' This object identifies the physical entity for which the
sensor is taking measurements. For example, for a sensor
measuring the voltage output of a power-supply, this object
would be the entPhysicalIndex of that power-supply; for a sensor
measuring the temperature inside one chassis of a multi-chassis
system, this object would be the enPhysicalIndex of that
chassis.
This object has a value of zero when the physical entity
for which the sensor is taking measurements can not be
represented by any one row in the entPhysicalTable, or that
there is no such physical entity.
''',
'entsensormeasuredentity',
'CISCO-ENTITY-SENSOR-MIB', False),
_MetaInfoClassMember('entSensorPrecision', ATTRIBUTE, 'int' , None, None,
[('-8', '9')], [],
''' This variable indicates the number of decimal
places of precision in fixed-point
sensor values reported by entSensorValue.
This variable is set to 0 when entSensorType
is not a fixed-point type: e.g.'percentRH(9)',
'rpm(10)', 'cmm(11)', or 'truthvalue(12)'.
This variable is set by the agent at start-up
and the value does not change during operation.
''',
'entsensorprecision',
'CISCO-ENTITY-SENSOR-MIB', False),
_MetaInfoClassMember('entSensorScale', REFERENCE_ENUM_CLASS, 'SensordatascaleEnum' , 'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB', 'SensordatascaleEnum',
[], [],
''' This variable indicates the exponent to apply
to sensor values reported by entSensorValue.
This variable is set by the agent at start-up
and the value does not change during operation.
''',
'entsensorscale',
'CISCO-ENTITY-SENSOR-MIB', False),
_MetaInfoClassMember('entSensorStatus', REFERENCE_ENUM_CLASS, 'SensorstatusEnum' , 'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB', 'SensorstatusEnum',
[], [],
''' This variable indicates the present operational status
of the sensor.
''',
'entsensorstatus',
'CISCO-ENTITY-SENSOR-MIB', False),
_MetaInfoClassMember('entSensorType', REFERENCE_ENUM_CLASS, 'SensordatatypeEnum' , 'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB', 'SensordatatypeEnum',
[], [],
''' This variable indicates the type of data
reported by the entSensorValue.
This variable is set by the agent at start-up
and the value does not change during operation.
''',
'entsensortype',
'CISCO-ENTITY-SENSOR-MIB', False),
_MetaInfoClassMember('entSensorValue', ATTRIBUTE, 'int' , None, None,
[('-1000000000', '1073741823')], [],
''' This variable reports the most recent measurement seen
by the sensor.
To correctly display or interpret this variable's value,
you must also know entSensorType, entSensorScale, and
entSensorPrecision.
However, you can compare entSensorValue with the threshold
values given in entSensorThresholdTable without any semantic
knowledge.
''',
'entsensorvalue',
'CISCO-ENTITY-SENSOR-MIB', False),
_MetaInfoClassMember('entSensorValueTimeStamp', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' This variable indicates the age of the value reported by
entSensorValue
''',
'entsensorvaluetimestamp',
'CISCO-ENTITY-SENSOR-MIB', False),
_MetaInfoClassMember('entSensorValueUpdateRate', ATTRIBUTE, 'int' , None, None,
[('0', '999999999')], [],
''' This variable indicates the rate that the agent
updates entSensorValue.
''',
'entsensorvalueupdaterate',
'CISCO-ENTITY-SENSOR-MIB', False),
],
'CISCO-ENTITY-SENSOR-MIB',
'entSensorValueEntry',
_yang_ns._namespaces['CISCO-ENTITY-SENSOR-MIB'],
'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB'
),
},
'CiscoEntitySensorMib.Entsensorvaluetable' : {
'meta_info' : _MetaInfoClass('CiscoEntitySensorMib.Entsensorvaluetable',
False,
[
_MetaInfoClassMember('entSensorValueEntry', REFERENCE_LIST, 'Entsensorvalueentry' , 'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB', 'CiscoEntitySensorMib.Entsensorvaluetable.Entsensorvalueentry',
[], [],
''' An entSensorValueTable entry describes the
present reading of a sensor, the measurement units
and scale, and sensor operational status.
''',
'entsensorvalueentry',
'CISCO-ENTITY-SENSOR-MIB', False),
],
'CISCO-ENTITY-SENSOR-MIB',
'entSensorValueTable',
_yang_ns._namespaces['CISCO-ENTITY-SENSOR-MIB'],
'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB'
),
},
'CiscoEntitySensorMib.Entsensorthresholdtable.Entsensorthresholdentry' : {
'meta_info' : _MetaInfoClass('CiscoEntitySensorMib.Entsensorthresholdtable.Entsensorthresholdentry',
False,
[
_MetaInfoClassMember('entPhysicalIndex', ATTRIBUTE, 'int' , None, None,
[('1', '2147483647')], [],
''' ''',
'entphysicalindex',
'CISCO-ENTITY-SENSOR-MIB', True),
_MetaInfoClassMember('entSensorThresholdIndex', ATTRIBUTE, 'int' , None, None,
[('1', '99999999')], [],
''' An index that uniquely identifies an entry
in the entSensorThresholdTable. This index
permits the same sensor to have several
different thresholds.
''',
'entsensorthresholdindex',
'CISCO-ENTITY-SENSOR-MIB', True),
_MetaInfoClassMember('entSensorThresholdEvaluation', ATTRIBUTE, 'bool' , None, None,
[], [],
''' This variable indicates the result of the most
recent evaluation of the threshold. If the threshold
condition is true, entSensorThresholdEvaluation
is true(1). If the threshold condition is false,
entSensorThresholdEvaluation is false(2).
Thresholds are evaluated at the rate indicated by
entSensorValueUpdateRate.
''',
'entsensorthresholdevaluation',
'CISCO-ENTITY-SENSOR-MIB', False),
_MetaInfoClassMember('entSensorThresholdNotificationEnable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' This variable controls generation of
entSensorThresholdNotification for this threshold.
When this variable is 'true', generation of
entSensorThresholdNotification is enabled for this
threshold. When this variable is 'false',
generation of entSensorThresholdNotification is
disabled for this threshold.
''',
'entsensorthresholdnotificationenable',
'CISCO-ENTITY-SENSOR-MIB', False),
_MetaInfoClassMember('entSensorThresholdRelation', REFERENCE_ENUM_CLASS, 'SensorthresholdrelationEnum' , 'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB', 'SensorthresholdrelationEnum',
[], [],
''' This variable indicates the relation between sensor value
(entSensorValue) and threshold value (entSensorThresholdValue),
required to trigger the alarm. when evaluating the relation,
entSensorValue is on the left of entSensorThresholdRelation,
entSensorThresholdValue is on the right.
in pseudo-code, the evaluation-alarm mechanism is:
...
if (entSensorStatus == ok) then
if (evaluate(entSensorValue, entSensorThresholdRelation,
entSensorThresholdValue))
then
if (entSensorThresholdNotificationEnable == true))
then
raise_alarm(sensor's entPhysicalIndex);
endif
endif
endif
...
''',
'entsensorthresholdrelation',
'CISCO-ENTITY-SENSOR-MIB', False),
_MetaInfoClassMember('entSensorThresholdSeverity', REFERENCE_ENUM_CLASS, 'SensorthresholdseverityEnum' , 'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB', 'SensorthresholdseverityEnum',
[], [],
''' This variable indicates the severity of this threshold.
''',
'entsensorthresholdseverity',
'CISCO-ENTITY-SENSOR-MIB', False),
_MetaInfoClassMember('entSensorThresholdValue', ATTRIBUTE, 'int' , None, None,
[('-1000000000', '1073741823')], [],
''' This variable indicates the value of the threshold.
To correctly display or interpret this variable's value,
you must also know entSensorType, entSensorScale, and
entSensorPrecision.
However, you can directly compare entSensorValue
with the threshold values given in entSensorThresholdTable
without any semantic knowledge.
''',
'entsensorthresholdvalue',
'CISCO-ENTITY-SENSOR-MIB', False),
],
'CISCO-ENTITY-SENSOR-MIB',
'entSensorThresholdEntry',
_yang_ns._namespaces['CISCO-ENTITY-SENSOR-MIB'],
'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB'
),
},
'CiscoEntitySensorMib.Entsensorthresholdtable' : {
'meta_info' : _MetaInfoClass('CiscoEntitySensorMib.Entsensorthresholdtable',
False,
[
_MetaInfoClassMember('entSensorThresholdEntry', REFERENCE_LIST, 'Entsensorthresholdentry' , 'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB', 'CiscoEntitySensorMib.Entsensorthresholdtable.Entsensorthresholdentry',
[], [],
''' An entSensorThresholdTable entry describes the
thresholds for a sensor: the threshold severity,
the threshold value, the relation, and the
evaluation of the threshold.
Only entities of type sensor(8) are listed in this table.
Only pre-configured thresholds are listed in this table.
Users can create sensor-value monitoring instruments
in different ways, such as RMON alarms, Expression-MIB, etc.
Entries are created by the agent at system startup and
FRU insertion. Entries are deleted by the agent at
FRU removal.
''',
'entsensorthresholdentry',
'CISCO-ENTITY-SENSOR-MIB', False),
],
'CISCO-ENTITY-SENSOR-MIB',
'entSensorThresholdTable',
_yang_ns._namespaces['CISCO-ENTITY-SENSOR-MIB'],
'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB'
),
},
'CiscoEntitySensorMib' : {
'meta_info' : _MetaInfoClass('CiscoEntitySensorMib',
False,
[
_MetaInfoClassMember('entSensorGlobalObjects', REFERENCE_CLASS, 'Entsensorglobalobjects' , 'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB', 'CiscoEntitySensorMib.Entsensorglobalobjects',
[], [],
''' ''',
'entsensorglobalobjects',
'CISCO-ENTITY-SENSOR-MIB', False),
_MetaInfoClassMember('entSensorThresholdTable', REFERENCE_CLASS, 'Entsensorthresholdtable' , 'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB', 'CiscoEntitySensorMib.Entsensorthresholdtable',
[], [],
''' This table lists the threshold severity, relation, and
comparison value, for a sensor listed in the Entity-MIB
entPhysicalTable.
''',
'entsensorthresholdtable',
'CISCO-ENTITY-SENSOR-MIB', False),
_MetaInfoClassMember('entSensorValueTable', REFERENCE_CLASS, 'Entsensorvaluetable' , 'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB', 'CiscoEntitySensorMib.Entsensorvaluetable',
[], [],
''' This table lists the type, scale, and present value
of a sensor listed in the Entity-MIB entPhysicalTable.
''',
'entsensorvaluetable',
'CISCO-ENTITY-SENSOR-MIB', False),
],
'CISCO-ENTITY-SENSOR-MIB',
'CISCO-ENTITY-SENSOR-MIB',
_yang_ns._namespaces['CISCO-ENTITY-SENSOR-MIB'],
'ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB'
),
},
}
_meta_table['CiscoEntitySensorMib.Entsensorvaluetable.Entsensorvalueentry']['meta_info'].parent =_meta_table['CiscoEntitySensorMib.Entsensorvaluetable']['meta_info']
_meta_table['CiscoEntitySensorMib.Entsensorthresholdtable.Entsensorthresholdentry']['meta_info'].parent =_meta_table['CiscoEntitySensorMib.Entsensorthresholdtable']['meta_info']
_meta_table['CiscoEntitySensorMib.Entsensorglobalobjects']['meta_info'].parent =_meta_table['CiscoEntitySensorMib']['meta_info']
_meta_table['CiscoEntitySensorMib.Entsensorvaluetable']['meta_info'].parent =_meta_table['CiscoEntitySensorMib']['meta_info']
_meta_table['CiscoEntitySensorMib.Entsensorthresholdtable']['meta_info'].parent =_meta_table['CiscoEntitySensorMib']['meta_info']
|
|
from django import forms
from django.forms import ModelForm
from tasks.models import Task_Foci_001
class Form_Task_Foci_001_00(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a0', 'cell_b0', 'cell_c0', 'cell_d0', 'cell_e0',
'cell_f0', 'cell_g0', 'cell_h0', 'cell_i0', 'cell_j0',)
column = 0
cell_a0 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b0 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c0 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d0 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e0 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f0 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g0 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h0 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i0 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j0 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_01(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a1', 'cell_b1', 'cell_c1', 'cell_d1', 'cell_e1',
'cell_f1', 'cell_g1', 'cell_h1', 'cell_i1', 'cell_j1',)
column = 1
cell_a1 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b1 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c1 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d1 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e1 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f1 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g1 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h1 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i1 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j1 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_02(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a2', 'cell_b2', 'cell_c2', 'cell_d2', 'cell_e2',
'cell_f2', 'cell_g2', 'cell_h2', 'cell_i2', 'cell_j2',)
column = 2
cell_a2 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b2 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c2 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d2 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e2 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f2 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g2 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h2 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i2 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j2 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_03(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a3', 'cell_b3', 'cell_c3', 'cell_d3', 'cell_e3',
'cell_f3', 'cell_g3', 'cell_h3', 'cell_i3', 'cell_j3',)
column = 3
cell_a3 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b3 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c3 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d3 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e3 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f3 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g3 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h3 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i3 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j3 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_04(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a4', 'cell_b4', 'cell_c4', 'cell_d4', 'cell_e4',
'cell_f4', 'cell_g4', 'cell_h4', 'cell_i4', 'cell_j4',)
column = 4
cell_a4 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b4 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c4 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d4 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e4 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f4 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g4 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h4 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i4 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j4 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_05(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a5', 'cell_b5', 'cell_c5', 'cell_d5', 'cell_e5',
'cell_f5', 'cell_g5', 'cell_h5', 'cell_i5', 'cell_j5',)
column = 5
cell_a5 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b5 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c5 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d5 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e5 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f5 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g5 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h5 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i5 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j5 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_06(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a6', 'cell_b6', 'cell_c6', 'cell_d6', 'cell_e6',
'cell_f6', 'cell_g6', 'cell_h6', 'cell_i6', 'cell_j6',)
column = 6
cell_a6 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b6 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c6 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d6 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e6 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f6 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g6 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h6 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i6 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j6 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_07(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a7', 'cell_b7', 'cell_c7', 'cell_d7', 'cell_e7',
'cell_f7', 'cell_g7', 'cell_h7', 'cell_i7', 'cell_j7',)
column = 7
cell_a7 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b7 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c7 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d7 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e7 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f7 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g7 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h7 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i7 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j7 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_08(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a8', 'cell_b8', 'cell_c8', 'cell_d8', 'cell_e8',
'cell_f8', 'cell_g8', 'cell_h8', 'cell_i8', 'cell_j8',)
column = 8
cell_a8 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b8 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c8 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d8 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e8 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f8 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g8 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h8 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i8 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j8 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_09(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a9', 'cell_b9', 'cell_c9', 'cell_d9', 'cell_e9',
'cell_f9', 'cell_g9', 'cell_h9', 'cell_i9', 'cell_j9',)
column = 9
cell_a9 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b9 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c9 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d9 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e9 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f9 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g9 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h9 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i9 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j9 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_010(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a10', 'cell_b10', 'cell_c10', 'cell_d10', 'cell_e10',
'cell_f10', 'cell_g10', 'cell_h10', 'cell_i10', 'cell_j10',)
column = 10
cell_a10 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b10 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c10 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d10 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e10 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f10 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g10 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h10 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i10 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j10 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_011(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a11', 'cell_b11', 'cell_c11', 'cell_d11', 'cell_e11',
'cell_f11', 'cell_g11', 'cell_h11', 'cell_i11', 'cell_j11',)
column = 11
cell_a11 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b11 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c11 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d11 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e11 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f11 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g11 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h11 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i11 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j11 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_012(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a12', 'cell_b12', 'cell_c12', 'cell_d12', 'cell_e12',
'cell_f12', 'cell_g12', 'cell_h12', 'cell_i12', 'cell_j12',)
column = 12
cell_a12 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b12 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c12 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d12 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e12 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f12 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g12 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h12 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i12 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j12 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_013(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a13', 'cell_b13', 'cell_c13', 'cell_d13', 'cell_e13',
'cell_f13', 'cell_g13', 'cell_h13', 'cell_i13', 'cell_j13',)
column = 13
cell_a13 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b13 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c13 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d13 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e13 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f13 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g13 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h13 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i13 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j13 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_014(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a14', 'cell_b14', 'cell_c14', 'cell_d14', 'cell_e14',
'cell_f14', 'cell_g14', 'cell_h14', 'cell_i14', 'cell_j14',)
column = 14
cell_a14 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b14 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c14 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d14 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e14 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f14 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g14 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h14 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i14 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j14 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_015(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a15', 'cell_b15', 'cell_c15', 'cell_d15', 'cell_e15',
'cell_f15', 'cell_g15', 'cell_h15', 'cell_i15', 'cell_j15',)
column = 15
cell_a15 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b15 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c15 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d15 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e15 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f15 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g15 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h15 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i15 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j15 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_016(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a16', 'cell_b16', 'cell_c16', 'cell_d16', 'cell_e16',
'cell_f16', 'cell_g16', 'cell_h16', 'cell_i16', 'cell_j16',)
column = 16
cell_a16 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b16 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c16 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d16 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e16 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f16 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g16 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h16 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i16 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j16 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_017(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a17', 'cell_b17', 'cell_c17', 'cell_d17', 'cell_e17',
'cell_f17', 'cell_g17', 'cell_h17', 'cell_i17', 'cell_j17',)
column = 17
cell_a17 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b17 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c17 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d17 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e17 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f17 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g17 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h17 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i17 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j17 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_018(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a18', 'cell_b18', 'cell_c18', 'cell_d18', 'cell_e18',
'cell_f18', 'cell_g18', 'cell_h18', 'cell_i18', 'cell_j18',)
column = 18
cell_a18 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b18 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c18 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d18 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e18 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f18 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g18 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h18 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i18 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j18 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_019(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a19', 'cell_b19', 'cell_c19', 'cell_d19', 'cell_e19',
'cell_f19', 'cell_g19', 'cell_h19', 'cell_i19', 'cell_j19',)
column = 19
cell_a19 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b19 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c19 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d19 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e19 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f19 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g19 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h19 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i19 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j19 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_020(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a20', 'cell_b20', 'cell_c20', 'cell_d20', 'cell_e20',
'cell_f20', 'cell_g20', 'cell_h20', 'cell_i20', 'cell_j20',)
column = 20
cell_a20 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b20 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c20 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d20 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e20 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f20 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g20 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h20 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i20 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j20 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_021(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a21', 'cell_b21', 'cell_c21', 'cell_d21', 'cell_e21',
'cell_f21', 'cell_g21', 'cell_h21', 'cell_i21', 'cell_j21',)
column = 21
cell_a21 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b21 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c21 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d21 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e21 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f21 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g21 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h21 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i21 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j21 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_022(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a22', 'cell_b22', 'cell_c22', 'cell_d22', 'cell_e22',
'cell_f22', 'cell_g22', 'cell_h22', 'cell_i22', 'cell_j22',)
column = 22
cell_a22 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b22 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c22 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d22 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e22 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f22 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g22 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h22 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i22 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j22 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_023(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a23', 'cell_b23', 'cell_c23', 'cell_d23', 'cell_e23',
'cell_f23', 'cell_g23', 'cell_h23', 'cell_i23', 'cell_j23',)
column = 23
cell_a23 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b23 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c23 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d23 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e23 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f23 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g23 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h23 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i23 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j23 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_024(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a24', 'cell_b24', 'cell_c24', 'cell_d24', 'cell_e24',
'cell_f24', 'cell_g24', 'cell_h24', 'cell_i24', 'cell_j24',)
column = 24
cell_a24 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b24 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c24 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d24 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e24 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f24 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g24 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h24 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i24 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j24 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_025(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a25', 'cell_b25', 'cell_c25', 'cell_d25', 'cell_e25',
'cell_f25', 'cell_g25', 'cell_h25', 'cell_i25', 'cell_j25',)
column = 25
cell_a25 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b25 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c25 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d25 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e25 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f25 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g25 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h25 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i25 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j25 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_026(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a26', 'cell_b26', 'cell_c26', 'cell_d26', 'cell_e26',
'cell_f26', 'cell_g26', 'cell_h26', 'cell_i26', 'cell_j26',)
column = 26
cell_a26 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b26 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c26 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d26 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e26 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f26 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g26 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h26 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i26 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j26 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_027(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a27', 'cell_b27', 'cell_c27', 'cell_d27', 'cell_e27',
'cell_f27', 'cell_g27', 'cell_h27', 'cell_i27', 'cell_j27',)
column = 27
cell_a27 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b27 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c27 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d27 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e27 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f27 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g27 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h27 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i27 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j27 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_028(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a28', 'cell_b28', 'cell_c28', 'cell_d28', 'cell_e28',
'cell_f28', 'cell_g28', 'cell_h28', 'cell_i28', 'cell_j28',)
column = 28
cell_a28 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b28 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c28 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d28 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e28 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f28 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g28 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h28 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i28 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j28 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_029(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a29', 'cell_b29', 'cell_c29', 'cell_d29', 'cell_e29',
'cell_f29', 'cell_g29', 'cell_h29', 'cell_i29', 'cell_j29',)
column = 29
cell_a29 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b29 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c29 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d29 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e29 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f29 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g29 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h29 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i29 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j29 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_030(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a30', 'cell_b30', 'cell_c30', 'cell_d30', 'cell_e30',
'cell_f30', 'cell_g30', 'cell_h30', 'cell_i30', 'cell_j30',)
column = 30
cell_a30 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b30 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c30 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d30 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e30 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f30 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g30 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h30 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i30 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j30 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_031(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a31', 'cell_b31', 'cell_c31', 'cell_d31', 'cell_e31',
'cell_f31', 'cell_g31', 'cell_h31', 'cell_i31', 'cell_j31',)
column = 31
cell_a31 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b31 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c31 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d31 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e31 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f31 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g31 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h31 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i31 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j31 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_032(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a32', 'cell_b32', 'cell_c32', 'cell_d32', 'cell_e32',
'cell_f32', 'cell_g32', 'cell_h32', 'cell_i32', 'cell_j32',)
column = 32
cell_a32 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b32 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c32 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d32 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e32 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f32 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g32 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h32 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i32 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j32 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_033(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a33', 'cell_b33', 'cell_c33', 'cell_d33', 'cell_e33',
'cell_f33', 'cell_g33', 'cell_h33', 'cell_i33', 'cell_j33',)
column = 33
cell_a33 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b33 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c33 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d33 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e33 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f33 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g33 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h33 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i33 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j33 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_034(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a34', 'cell_b34', 'cell_c34', 'cell_d34', 'cell_e34',
'cell_f34', 'cell_g34', 'cell_h34', 'cell_i34', 'cell_j34',)
column = 34
cell_a34 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b34 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c34 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d34 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e34 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f34 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g34 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h34 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i34 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j34 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_035(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a35', 'cell_b35', 'cell_c35', 'cell_d35', 'cell_e35',
'cell_f35', 'cell_g35', 'cell_h35', 'cell_i35', 'cell_j35',)
column = 35
cell_a35 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b35 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c35 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d35 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e35 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f35 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g35 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h35 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i35 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j35 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_036(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a36', 'cell_b36', 'cell_c36', 'cell_d36', 'cell_e36',
'cell_f36', 'cell_g36', 'cell_h36', 'cell_i36', 'cell_j36',)
column = 36
cell_a36 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b36 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c36 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d36 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e36 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f36 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g36 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h36 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i36 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j36 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_037(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a37', 'cell_b37', 'cell_c37', 'cell_d37', 'cell_e37',
'cell_f37', 'cell_g37', 'cell_h37', 'cell_i37', 'cell_j37',)
column = 37
cell_a37 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b37 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c37 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d37 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e37 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f37 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g37 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h37 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i37 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j37 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_038(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a38', 'cell_b38', 'cell_c38', 'cell_d38', 'cell_e38',
'cell_f38', 'cell_g38', 'cell_h38', 'cell_i38', 'cell_j38',)
column = 38
cell_a38 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b38 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c38 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d38 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e38 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f38 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g38 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h38 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i38 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j38 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_039(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a39', 'cell_b39', 'cell_c39', 'cell_d39', 'cell_e39',
'cell_f39', 'cell_g39', 'cell_h39', 'cell_i39', 'cell_j39',)
column = 39
cell_a39 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b39 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c39 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d39 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e39 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f39 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g39 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h39 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i39 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j39 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
class Form_Task_Foci_001_040(ModelForm):
class Meta:
model = Task_Foci_001
fields = ('cell_a40', 'cell_b40', 'cell_c40', 'cell_d40', 'cell_e40',
'cell_f40', 'cell_g40', 'cell_h40', 'cell_i40', 'cell_j40',)
column = 40
cell_a40 = forms.CharField(widget=forms.TextInput(), label = "A", required=False)
cell_b40 = forms.CharField(widget=forms.TextInput(), label = "B", required=False)
cell_c40 = forms.CharField(widget=forms.TextInput(), label = "C", required=False)
cell_d40 = forms.CharField(widget=forms.TextInput(), label = "D", required=False)
cell_e40 = forms.CharField(widget=forms.TextInput(), label = "E", required=False)
cell_f40 = forms.CharField(widget=forms.TextInput(), label = "F", required=False)
cell_g40 = forms.CharField(widget=forms.TextInput(), label = "G", required=False)
cell_h40 = forms.CharField(widget=forms.TextInput(), label = "H", required=False)
cell_i40 = forms.CharField(widget=forms.TextInput(), label = "I", required=False)
cell_j40 = forms.CharField(widget=forms.TextInput(), label = "J", required=False)
|
|
import atexit
import logging
import os
import threading
import cherrypy
import ifcfg
import requests
from django.conf import settings
from django.core.management import call_command
from .system import kill_pid
from .system import pid_exists
from kolibri.content.utils import paths
from kolibri.utils.conf import KOLIBRI_HOME
logger = logging.getLogger(__name__)
# Status codes for kolibri
STATUS_RUNNING = 0
STATUS_STOPPED = 1
STATUS_STARTING_UP = 4
STATUS_NOT_RESPONDING = 5
STATUS_FAILED_TO_START = 6
STATUS_UNCLEAN_SHUTDOWN = 7
STATUS_UNKNOWN_INSTANCE = 8
STATUS_SERVER_CONFIGURATION_ERROR = 9
STATUS_PID_FILE_READ_ERROR = 99
STATUS_PID_FILE_INVALID = 100
STATUS_UNKNOWN = 101
# Used to store PID and port number (both in foreground and daemon mode)
PID_FILE = os.path.join(KOLIBRI_HOME, "server.pid")
# Used to PID, port during certain exclusive startup process, before we fork
# to daemon mode
STARTUP_LOCK = os.path.join(KOLIBRI_HOME, "server.lock")
# This is a special file with daemon activity. It logs ALL stderr output, some
# might not have made it to the log file!
DAEMON_LOG = os.path.join(KOLIBRI_HOME, "server.log")
# Currently non-configurable until we know how to properly handle this
LISTEN_ADDRESS = "0.0.0.0"
class NotRunning(Exception):
"""
Raised when server was expected to run, but didn't. Contains a status
code explaining why.
"""
def __init__(self, status_code):
self.status_code = status_code
super(NotRunning, self).__init__()
def start(port=8080):
"""
Starts the server.
:param: port: Port number (default: 8080)
"""
# start the pingback thread
PingbackThread.start_command()
# Write the new PID
with open(PID_FILE, 'w') as f:
f.write("%d\n%d" % (os.getpid(), port))
# This should be run every time the server is started for now.
# Events to trigger it are hard, because of copying a content folder into
# ~/.kolibri, or deleting a channel DB on disk
from kolibri.content.utils.annotation import update_channel_metadata
update_channel_metadata()
# This is also run every time the server is started to clear all the tasks
# in the queue
from kolibri.tasks.client import get_client
get_client().clear(force=True)
def rm_pid_file():
os.unlink(PID_FILE)
atexit.register(rm_pid_file)
run_server(port=port)
class PingbackThread(threading.Thread):
@classmethod
def start_command(cls):
thread = cls()
thread.daemon = True
thread.start()
def run(self):
call_command("ping")
def stop(pid=None, force=False):
"""
Stops the kalite server, either from PID or through a management command
:param args: List of options to parse to the django management command
:raises: NotRunning
"""
if not force:
# Kill the KA lite server
kill_pid(pid)
else:
try:
pid, __ = _read_pid_file(PID_FILE)
kill_pid(pid)
except ValueError:
logger.error("Could not find PID in .pid file\n")
except OSError:
logger.error("Could not read .pid file\n")
# TODO: Check that server has in fact been killed, otherwise we should
# raise an error...
# Finally, remove the PID file
os.unlink(PID_FILE)
def run_server(port):
# Mount the application
from kolibri.deployment.default.wsgi import application
cherrypy.tree.graft(application, "/")
cherrypy.config.update({"environment": "production"})
serve_static_dir(settings.STATIC_ROOT, settings.STATIC_URL)
serve_static_dir(settings.CONTENT_DATABASE_DIR,
paths.get_content_database_url("/"))
serve_static_dir(settings.CONTENT_STORAGE_DIR,
paths.get_content_storage_url("/"))
# Unsubscribe the default server
cherrypy.server.unsubscribe()
# Instantiate a new server object
server = cherrypy._cpserver.Server()
# Configure the server
server.socket_host = LISTEN_ADDRESS
server.socket_port = port
server.thread_pool = 30
# Subscribe this server
server.subscribe()
# Start the server engine (Option 1 *and* 2)
cherrypy.engine.start()
cherrypy.engine.block()
def serve_static_dir(root, url):
static_handler = cherrypy.tools.staticdir.handler(
section="/",
dir=os.path.split(root)[1],
root=os.path.abspath(os.path.split(root)[0]))
cherrypy.tree.mount(static_handler, url)
def _read_pid_file(filename):
"""
Reads a pid file and returns the contents. PID files have 1 or 2 lines;
- first line is always the pid
- optional second line is the port the server is listening on.
:param filename: Path of PID to read
:return: (pid, port): with the PID in the file and the port number
if it exists. If the port number doesn't exist, then
port is None.
"""
pid_file_lines = open(filename, "r").readlines()
if len(pid_file_lines) == 2:
pid, port = pid_file_lines
pid, port = int(pid), int(port)
elif len(pid_file_lines) == 1:
# The file only had one line
pid, port = int(pid_file_lines[0]), None
else:
raise ValueError("PID file must have 1 or two lines")
return pid, port
def _write_pid_file(filename, port):
"""
Writes a PID file in the format Kolibri parses
:param: filename: Path of file to write
:param: port: Listening port number which the server is assigned
"""
with open(filename, 'w') as f:
f.write("%d\n%d" % (os.getpid(), port))
def get_status(): # noqa: max-complexity=16
"""
Tries to get the PID of a running server.
The behavior is also quite redundant given that `kalite start` should
always create a PID file, and if its been started directly with the
runserver command, then its up to the developer to know what's happening.
:returns: (PID, address, port), where address is not currently detected in
a valid way because it's not configurable, and we might be
listening on several IPs.
:raises: NotRunning
"""
# There is no PID file (created by server daemon)
if not os.path.isfile(PID_FILE):
# Is there a startup lock?
if os.path.isfile(STARTUP_LOCK):
try:
pid, port = _read_pid_file(STARTUP_LOCK)
# Does the PID in there still exist?
if pid_exists(pid):
raise NotRunning(STATUS_STARTING_UP)
# It's dead so assuming the startup went badly
else:
raise NotRunning(STATUS_FAILED_TO_START)
# Couldn't parse to int or empty PID file
except (TypeError, ValueError):
raise NotRunning(STATUS_STOPPED)
raise NotRunning(STATUS_STOPPED) # Stopped
# PID file exists, check if it is running
try:
pid, port = _read_pid_file(PID_FILE)
except (ValueError, OSError):
raise NotRunning(STATUS_PID_FILE_INVALID) # Invalid PID file
# PID file exists, but process is dead
if pid is None or not pid_exists(pid):
if os.path.isfile(STARTUP_LOCK):
raise NotRunning(STATUS_FAILED_TO_START) # Failed to start
raise NotRunning(STATUS_UNCLEAN_SHUTDOWN) # Unclean shutdown
listen_port = port
try:
# Timeout is 3 seconds, we don't want the status command to be slow
# TODO: Using 127.0.0.1 is a hardcode default from KA Lite, it could
# be configurable
# TODO: HTTP might not be the protocol if server has SSL
response = requests.get(
"http://{}:{}".format("127.0.0.1", listen_port), timeout=3)
except (requests.exceptions.ReadTimeout,
requests.exceptions.ConnectionError):
raise NotRunning(STATUS_NOT_RESPONDING)
except (requests.exceptions.RequestException):
if os.path.isfile(STARTUP_LOCK):
raise NotRunning(STATUS_STARTING_UP) # Starting up
raise NotRunning(STATUS_UNCLEAN_SHUTDOWN)
if response.status_code == 404:
raise NotRunning(STATUS_UNKNOWN_INSTANCE) # Unknown HTTP server
if response.status_code != 200:
# Probably a mis-configured kolibri
raise NotRunning(STATUS_SERVER_CONFIGURATION_ERROR)
return pid, LISTEN_ADDRESS, listen_port # Correct PID !
# We don't detect this at present:
# Could be detected because we fetch the PID directly via HTTP, but this
# is dumb because kolibri could be running in a worker pool with different
# PID from the PID file..
# raise NotRunning(STATUS_UNKNOWN_INSTANCE)
# This would be the fallback when we know it's not running, but we can't
# give a proper reason...
# raise NotRunning(STATUS_UNKNOW)
def get_urls(listen_port=None):
"""
:param listen_port: if set, will not try to determine the listen port from
other running instances.
"""
try:
if listen_port:
port = listen_port
else:
__, __, port = get_status()
urls = []
interfaces = ifcfg.interfaces()
for interface in filter(lambda i: i['inet'], interfaces.values()):
urls.append("http://{}:{}/".format(interface['inet'], port))
return STATUS_RUNNING, urls
except NotRunning as e:
return e.status_code, []
|
|
"""Translation helper functions."""
import locale
import os
import re
import sys
import warnings
import gettext as gettext_module
from cStringIO import StringIO
from threading import local
from django.utils.importlib import import_module
from django.utils.safestring import mark_safe, SafeData
# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = u"\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9.
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z]{1,8})*|\*) # "en", "en-au", "x-y-z", "*"
(?:;q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower()+'_'+language[p+1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p+1:]) > 2:
return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower()
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset. Django uses a defined DEFAULT_CHARSET as the output charset on
Python 2.4.
"""
def __init__(self, *args, **kw):
from django.conf import settings
gettext_module.GNUTranslations.__init__(self, *args, **kw)
# Starting with Python 2.4, there's a function to define
# the output charset. Before 2.4, the output charset is
# identical with the translation file charset.
try:
self.set_output_charset('utf-8')
except AttributeError:
pass
self.django_output_charset = 'utf-8'
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
self.__to_language = to_language(language)
def language(self):
return self.__language
def to_language(self):
return self.__to_language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if settings.SETTINGS_MODULE is not None:
parts = settings.SETTINGS_MODULE.split('.')
project = import_module(parts[0])
projectpath = os.path.join(os.path.dirname(project.__file__), 'locale')
else:
projectpath = None
def _fetch(lang, fallback=None):
global _translations
loc = to_locale(lang)
res = _translations.get(lang, None)
if res is not None:
return res
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], DjangoTranslation)
t.set_language(lang)
return t
except IOError, e:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in _translations]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for localepath in settings.LOCALE_PATHS:
if os.path.isdir(localepath):
res = _merge(localepath)
for appname in settings.INSTALLED_APPS:
app = import_module(appname)
apppath = os.path.join(os.path.dirname(app.__file__), 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
if projectpath and os.path.isdir(projectpath):
res = _merge(projectpath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
if isinstance(language, basestring) and language == 'no':
warnings.warn(
"The use of the language code 'no' is deprecated. "
"Please use the 'nb' translation instead.",
DeprecationWarning
)
_active.value = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from django.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
from django.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
eol_message = message.replace('\r\n', '\n').replace('\r', '\n')
t = getattr(_active, "value", None)
if t is not None:
result = getattr(t, translation_function)(eol_message)
else:
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
result = getattr(_default, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
return do_translate(message, 'gettext')
def ugettext(message):
return do_translate(message, 'ugettext')
def pgettext(context, message):
result = do_translate(
u"%s%s%s" % (context, CONTEXT_SEPARATOR, message), 'ugettext')
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
return result
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a UTF-8 bytestring of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def npgettext(context, singular, plural, number):
result = do_ntranslate(u"%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
u"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number, 'ungettext')
if CONTEXT_SEPARATOR in result:
# Translation not found
result = do_ntranslate(singular, plural, number, 'ungettext')
return result
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available. This is only used for language codes from either the cookies or
session.
"""
from django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if gettext_module.find('django', globalpath, [to_locale(lang_code)]) is not None:
return True
else:
return False
def get_language_from_request(request):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
"""
global _accepted
from django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
supported = dict(settings.LANGUAGES)
if hasattr(request, 'session'):
lang_code = request.session.get('django_language', None)
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if lang_code and lang_code not in supported:
lang_code = lang_code.split('-')[0] # e.g. if fr-ca is not supported fallback to fr
if lang_code and lang_code in supported and check_for_language(lang_code):
return lang_code
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
# We have a very restricted form for our language files (no encoding
# specifier, since they all must be UTF-8 and only one possible
# language each time. So we avoid the overhead of gettext.find() and
# work out the MO file manually.
# 'normalized' is the root name of the locale in POSIX format (which is
# the format used for the directories holding the MO files).
normalized = locale.locale_alias.get(to_locale(accept_lang, True))
if not normalized:
continue
# Remove the default encoding from locale_alias.
normalized = normalized.split('.')[0]
if normalized in _accepted:
# We've seen this locale before and have an MO file for it, so no
# need to check again.
return _accepted[normalized]
for lang, dirname in ((accept_lang, normalized),
(accept_lang.split('-')[0], normalized.split('_')[0])):
if lang.lower() not in supported:
continue
langfile = os.path.join(globalpath, dirname, 'LC_MESSAGES',
'django.mo')
if os.path.exists(langfile):
_accepted[normalized] = lang
return lang
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
inline_re = re.compile(r"""^\s*trans\s+((?:".*?")|(?:'.*?'))\s*""")
block_re = re.compile(r"""^\s*blocktrans(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
def templatize(src, origin=None):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.template import Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, TOKEN_COMMENT
out = StringIO()
intrans = False
inplural = False
singular = []
plural = []
incomment = False
comment = []
for t in Lexer(src, origin).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
out.write(' # %s' % ''.join(comment))
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
out.write(' ngettext(%r,%r,count) ' % (''.join(singular), ''.join(plural)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
out.write(' gettext(%r) ' % ''.join(singular))
for part in singular:
out.write(blankout(part, 'S'))
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno))
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = t.contents.replace('%', '%%')
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"': g = g.strip('"')
elif g[0] == "'": g = g.strip("'")
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
intrans = True
inplural = False
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':',1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
out.write(' # %s' % t.contents)
else:
out.write(blankout(t.contents, 'X'))
return out.getvalue()
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string)
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return []
priority = priority and float(priority) or 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return result
# get_date_formats and get_partial_date_formats aren't used anymore by Django
# and are kept for backward compatibility.
# Note, it's also important to keep format names marked for translation.
# For compatibility we still want to have formats on translation catalogs.
# That makes template code like {{ my_date|date:_('DATE_FORMAT') }} still work
def get_date_formats():
"""
Checks whether translation files provide a translation for some technical
message ID to store date and time formats. If it doesn't contain one, the
formats provided in the settings will be used.
"""
warnings.warn(
"'django.utils.translation.get_date_formats' is deprecated. "
"Please update your code to use the new i18n aware formatting.",
DeprecationWarning
)
from django.conf import settings
date_format = ugettext('DATE_FORMAT')
datetime_format = ugettext('DATETIME_FORMAT')
time_format = ugettext('TIME_FORMAT')
if date_format == 'DATE_FORMAT':
date_format = settings.DATE_FORMAT
if datetime_format == 'DATETIME_FORMAT':
datetime_format = settings.DATETIME_FORMAT
if time_format == 'TIME_FORMAT':
time_format = settings.TIME_FORMAT
return date_format, datetime_format, time_format
def get_partial_date_formats():
"""
Checks whether translation files provide a translation for some technical
message ID to store partial date formats. If it doesn't contain one, the
formats provided in the settings will be used.
"""
warnings.warn(
"'django.utils.translation.get_partial_date_formats' is deprecated. "
"Please update your code to use the new i18n aware formatting.",
DeprecationWarning
)
from django.conf import settings
year_month_format = ugettext('YEAR_MONTH_FORMAT')
month_day_format = ugettext('MONTH_DAY_FORMAT')
if year_month_format == 'YEAR_MONTH_FORMAT':
year_month_format = settings.YEAR_MONTH_FORMAT
if month_day_format == 'MONTH_DAY_FORMAT':
month_day_format = settings.MONTH_DAY_FORMAT
return year_month_format, month_day_format
|
|
# Copyright (c) 2003-2010 Sylvain Thenault (thenault@gmail.com).
# Copyright (c) 2003-2012 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
""" %prog [options] module_or_package
Check that a module satisfy a coding standard (and more !).
%prog --help
Display this help message and exit.
%prog --help-msg <msg-id>[,<msg-id>]
Display help messages about given message identifiers and exit.
"""
# import this first to avoid builtin namespace pollution
from pylint.checkers import utils
import sys
import os
import re
import tokenize
from warnings import warn
from logilab.common.configuration import UnsupportedAction, OptionsManagerMixIn
from logilab.common.optik_ext import check_csv
from logilab.common.modutils import load_module_from_name
from logilab.common.interface import implements
from logilab.common.textutils import splitstrip
from logilab.common.ureports import Table, Text, Section
from logilab.common.__pkginfo__ import version as common_version
from logilab.astng import MANAGER, nodes, ASTNGBuildingException
from logilab.astng.__pkginfo__ import version as astng_version
from pylint.utils import PyLintASTWalker, UnknownMessage, MessagesHandlerMixIn,\
ReportsHandlerMixIn, MSG_TYPES, expand_modules
from pylint.interfaces import ILinter, IRawChecker, IASTNGChecker
from pylint.checkers import BaseRawChecker, EmptyReport, \
table_lines_from_stats
from pylint.reporters.text import TextReporter, ParseableTextReporter, \
VSTextReporter, ColorizedTextReporter
from pylint.reporters.html import HTMLReporter
from pylint import config
from pylint.__pkginfo__ import version
OPTION_RGX = re.compile('\s*#*\s*pylint:(.*)')
REPORTER_OPT_MAP = {'text': TextReporter,
'parseable': ParseableTextReporter,
'msvs': VSTextReporter,
'colorized': ColorizedTextReporter,
'html': HTMLReporter,}
def _get_python_path(filepath):
dirname = os.path.dirname(os.path.realpath(
os.path.expanduser(filepath)))
while True:
if not os.path.exists(os.path.join(dirname, "__init__.py")):
return dirname
old_dirname = dirname
dirname = os.path.dirname(dirname)
if old_dirname == dirname:
return os.getcwd()
# Python Linter class #########################################################
MSGS = {
'F0001': ('%s',
'Used when an error occurred preventing the analysis of a \
module (unable to find it for instance).'),
'F0002': ('%s: %s',
'Used when an unexpected error occurred while building the ASTNG \
representation. This is usually accompanied by a traceback. \
Please report such errors !'),
'F0003': ('ignored builtin module %s',
'Used to indicate that the user asked to analyze a builtin module\
which has been skipped.'),
'F0004': ('unexpected inferred value %s',
'Used to indicate that some value of an unexpected type has been \
inferred.'),
'F0010': ('error while code parsing: %s',
'Used when an exception occured while building the ASTNG \
representation which could be handled by astng.'),
'I0001': ('Unable to run raw checkers on built-in module %s',
'Used to inform that a built-in module has not been checked \
using the raw checkers.'),
'I0010': ('Unable to consider inline option %r',
'Used when an inline option is either badly formatted or can\'t \
be used inside modules.'),
'I0011': ('Locally disabling %s',
'Used when an inline option disables a message or a messages \
category.'),
'I0012': ('Locally enabling %s',
'Used when an inline option enables a message or a messages \
category.'),
'I0013': ('Ignoring entire file',
'Used to inform that the file will not be checked'),
'E0001': ('%s',
'Used when a syntax error is raised for a module.'),
'E0011': ('Unrecognized file option %r',
'Used when an unknown inline option is encountered.'),
'E0012': ('Bad option value %r',
'Used when a bad value for an inline option is encountered.'),
}
class PyLinter(OptionsManagerMixIn, MessagesHandlerMixIn, ReportsHandlerMixIn,
BaseRawChecker):
"""lint Python modules using external checkers.
This is the main checker controlling the other ones and the reports
generation. It is itself both a raw checker and an astng checker in order
to:
* handle message activation / deactivation at the module level
* handle some basic but necessary stats'data (number of classes, methods...)
IDE plugins developpers: you may have to call
`logilab.astng.builder.MANAGER.astng_cache.clear()` accross run if you want
to ensure the latest code version is actually checked.
"""
__implements__ = (ILinter, IRawChecker)
name = 'master'
priority = 0
level = 0
msgs = MSGS
may_be_disabled = False
@staticmethod
def make_options():
return (('ignore',
{'type' : 'csv', 'metavar' : '<file>[,<file>...]',
'dest' : 'black_list', 'default' : ('CVS',),
'help' : 'Add files or directories to the blacklist. \
They should be base names, not paths.'}),
('persistent',
{'default': True, 'type' : 'yn', 'metavar' : '<y_or_n>',
'level': 1,
'help' : 'Pickle collected data for later comparisons.'}),
('load-plugins',
{'type' : 'csv', 'metavar' : '<modules>', 'default' : (),
'level': 1,
'help' : 'List of plugins (as comma separated values of \
python modules names) to load, usually to register additional checkers.'}),
('output-format',
{'default': 'text', 'type': 'choice', 'metavar' : '<format>',
'choices': REPORTER_OPT_MAP.keys(),
'short': 'f',
'group': 'Reports',
'help' : 'Set the output format. Available formats are text,\
parseable, colorized, msvs (visual studio) and html'}),
('include-ids',
{'type' : 'yn', 'metavar' : '<y_or_n>', 'default' : 0,
'short': 'i',
'group': 'Reports',
'help' : 'Include message\'s id in output'}),
('files-output',
{'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>',
'group': 'Reports', 'level': 1,
'help' : 'Put messages in a separate file for each module / \
package specified on the command line instead of printing them on stdout. \
Reports (if any) will be written in a file name "pylint_global.[txt|html]".'}),
('reports',
{'default': 1, 'type' : 'yn', 'metavar' : '<y_or_n>',
'short': 'r',
'group': 'Reports',
'help' : 'Tells whether to display a full report or only the\
messages'}),
('evaluation',
{'type' : 'string', 'metavar' : '<python_expression>',
'group': 'Reports', 'level': 1,
'default': '10.0 - ((float(5 * error + warning + refactor + \
convention) / statement) * 10)',
'help' : 'Python expression which should return a note less \
than 10 (10 is the highest note). You have access to the variables errors \
warning, statement which respectively contain the number of errors / warnings\
messages and the total number of statements analyzed. This is used by the \
global evaluation report (RP0004).'}),
('comment',
{'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>',
'group': 'Reports', 'level': 1,
'help' : 'Add a comment according to your evaluation note. \
This is used by the global evaluation report (RP0004).'}),
('enable',
{'type' : 'csv', 'metavar': '<msg ids>',
'short': 'e',
'group': 'Messages control',
'help' : 'Enable the message, report, category or checker with the '
'given id(s). You can either give multiple identifier '
'separated by comma (,) or put this option multiple time.'}),
('disable',
{'type' : 'csv', 'metavar': '<msg ids>',
'short': 'd',
'group': 'Messages control',
'help' : 'Disable the message, report, category or checker '
'with the given id(s). You can either give multiple identifier'
' separated by comma (,) or put this option multiple time '
'(only on the command line, not in the configuration file '
'where it should appear only once).'}),
)
option_groups = (
('Messages control', 'Options controling analysis messages'),
('Reports', 'Options related to output formating and reporting'),
)
def __init__(self, options=(), reporter=None, option_groups=(),
pylintrc=None):
# some stuff has to be done before ancestors initialization...
#
# checkers / reporter / astng manager
self.reporter = None
self._checkers = {}
self._ignore_file = False
# visit variables
self.base_name = None
self.base_file = None
self.current_name = None
self.current_file = None
self.stats = None
# init options
self.options = options + PyLinter.make_options()
self.option_groups = option_groups + PyLinter.option_groups
self._options_methods = {
'enable': self.enable,
'disable': self.disable}
self._bw_options_methods = {'disable-msg': self.disable,
'enable-msg': self.enable}
full_version = '%%prog %s, \nastng %s, common %s\nPython %s' % (
version, astng_version, common_version, sys.version)
OptionsManagerMixIn.__init__(self, usage=__doc__,
version=full_version,
config_file=pylintrc or config.PYLINTRC)
MessagesHandlerMixIn.__init__(self)
ReportsHandlerMixIn.__init__(self)
BaseRawChecker.__init__(self)
# provided reports
self.reports = (('RP0001', 'Messages by category',
report_total_messages_stats),
('RP0002', '% errors / warnings by module',
report_messages_by_module_stats),
('RP0003', 'Messages',
report_messages_stats),
('RP0004', 'Global evaluation',
self.report_evaluation),
)
self.register_checker(self)
self._dynamic_plugins = []
self.load_provider_defaults()
self.set_reporter(reporter or TextReporter(sys.stdout))
def load_default_plugins(self):
from pylint import checkers
checkers.initialize(self)
def load_plugin_modules(self, modnames):
"""take a list of module names which are pylint plugins and load
and register them
"""
for modname in modnames:
if modname in self._dynamic_plugins:
continue
self._dynamic_plugins.append(modname)
module = load_module_from_name(modname)
module.register(self)
def set_reporter(self, reporter):
"""set the reporter used to display messages and reports"""
self.reporter = reporter
reporter.linter = self
def set_option(self, optname, value, action=None, optdict=None):
"""overridden from configuration.OptionsProviderMixin to handle some
special options
"""
if optname in self._options_methods or optname in self._bw_options_methods:
if value:
try:
meth = self._options_methods[optname]
except KeyError:
meth = self._bw_options_methods[optname]
warn('%s is deprecated, replace it by %s' % (
optname, optname.split('-')[0]), DeprecationWarning)
value = check_csv(None, optname, value)
if isinstance(value, (list, tuple)):
for _id in value :
meth(_id)
else :
meth(value)
elif optname == 'output-format':
self.set_reporter(REPORTER_OPT_MAP[value.lower()]())
try:
BaseRawChecker.set_option(self, optname, value, action, optdict)
except UnsupportedAction:
print >> sys.stderr, 'option %s can\'t be read from config file' % \
optname
# checkers manipulation methods ############################################
def register_checker(self, checker):
"""register a new checker
checker is an object implementing IRawChecker or / and IASTNGChecker
"""
assert checker.priority <= 0, 'checker priority can\'t be >= 0'
self._checkers.setdefault(checker.name, []).append(checker)
for r_id, r_title, r_cb in checker.reports:
self.register_report(r_id, r_title, r_cb, checker)
self.register_options_provider(checker)
if hasattr(checker, 'msgs'):
self.register_messages(checker)
checker.load_defaults()
def disable_noerror_messages(self):
for msgcat, msgids in self._msgs_by_category.iteritems():
if msgcat == 'E':
for msgid in msgids:
self.enable(msgid)
else:
for msgid in msgids:
self.disable(msgid)
def disable_reporters(self):
"""disable all reporters"""
for reporters in self._reports.values():
for report_id, _title, _cb in reporters:
self.disable_report(report_id)
def error_mode(self):
"""error mode: enable only errors; no reports, no persistent"""
self.disable_noerror_messages()
self.disable('miscellaneous')
self.set_option('reports', False)
self.set_option('persistent', False)
# block level option handling #############################################
#
# see func_block_disable_msg.py test case for expected behaviour
def process_tokens(self, tokens):
"""process tokens from the current module to search for module/block
level options
"""
comment = tokenize.COMMENT
newline = tokenize.NEWLINE
for (tok_type, _, start, _, line) in tokens:
if tok_type not in (comment, newline):
continue
match = OPTION_RGX.search(line)
if match is None:
continue
if match.group(1).strip() == "disable-all":
self.add_message('I0013', line=start[0])
self._ignore_file = True
return
try:
opt, value = match.group(1).split('=', 1)
except ValueError:
self.add_message('I0010', args=match.group(1).strip(),
line=start[0])
continue
opt = opt.strip()
if opt in self._options_methods or opt in self._bw_options_methods:
try:
meth = self._options_methods[opt]
except KeyError:
meth = self._bw_options_methods[opt]
warn('%s is deprecated, replace it by %s (%s, line %s)' % (
opt, opt.split('-')[0], self.current_file, line),
DeprecationWarning)
for msgid in splitstrip(value):
try:
meth(msgid, 'module', start[0])
except UnknownMessage:
self.add_message('E0012', args=msgid, line=start[0])
else:
self.add_message('E0011', args=opt, line=start[0])
def collect_block_lines(self, node, msg_state):
"""walk ast to collect block level options line numbers"""
# recurse on children (depth first)
for child in node.get_children():
self.collect_block_lines(child, msg_state)
first = node.fromlineno
last = node.tolineno
# first child line number used to distinguish between disable
# which are the first child of scoped node with those defined later.
# For instance in the code below:
#
# 1. def meth8(self):
# 2. """test late disabling"""
# 3. # pylint: disable=E1102
# 4. print self.blip
# 5. # pylint: disable=E1101
# 6. print self.bla
#
# E1102 should be disabled from line 1 to 6 while E1101 from line 5 to 6
#
# this is necessary to disable locally messages applying to class /
# function using their fromlineno
if isinstance(node, (nodes.Module, nodes.Class, nodes.Function)) and node.body:
firstchildlineno = node.body[0].fromlineno
else:
firstchildlineno = last
for msgid, lines in msg_state.iteritems():
for lineno, state in lines.items():
if first <= lineno <= last:
if lineno > firstchildlineno:
state = True
# set state for all lines for this block
first, last = node.block_range(lineno)
for line in xrange(first, last+1):
# do not override existing entries
if not line in self._module_msgs_state.get(msgid, ()):
if line in lines: # state change in the same block
state = lines[line]
try:
self._module_msgs_state[msgid][line] = state
except KeyError:
self._module_msgs_state[msgid] = {line: state}
del lines[lineno]
# code checking methods ###################################################
def get_checkers(self):
"""return all available checkers as a list"""
return [self] + [c for checkers in self._checkers.values()
for c in checkers if c is not self]
def prepare_checkers(self):
"""return checkers needed for activated messages and reports"""
if not self.config.reports:
self.disable_reporters()
# get needed checkers
neededcheckers = [self]
for checker in self.get_checkers()[1:]:
messages = set(msg for msg in checker.msgs
if self.is_message_enabled(msg))
if (messages or
any(self.report_is_enabled(r[0]) for r in checker.reports)):
neededcheckers.append(checker)
checker.active_msgs = messages
return neededcheckers
def check(self, files_or_modules):
"""main checking entry: check a list of files or modules from their
name.
"""
self.reporter.include_ids = self.config.include_ids
if not isinstance(files_or_modules, (list, tuple)):
files_or_modules = (files_or_modules,)
walker = PyLintASTWalker(self)
checkers = self.prepare_checkers()
rawcheckers = [c for c in checkers if implements(c, IRawChecker)
and c is not self]
# notify global begin
for checker in checkers:
checker.open()
if implements(checker, IASTNGChecker):
walker.add_checker(checker)
# build ast and check modules or packages
for descr in self.expand_files(files_or_modules):
modname, filepath = descr['name'], descr['path']
self.set_current_module(modname, filepath)
# get the module representation
astng = self.get_astng(filepath, modname)
if astng is None:
continue
self.base_name = descr['basename']
self.base_file = descr['basepath']
if self.config.files_output:
reportfile = 'pylint_%s.%s' % (modname, self.reporter.extension)
self.reporter.set_output(open(reportfile, 'w'))
self._ignore_file = False
# fix the current file (if the source file was not available or
# if it's actually a c extension)
self.current_file = astng.file
self.check_astng_module(astng, walker, rawcheckers)
# notify global end
self.set_current_module('')
self.stats['statement'] = walker.nbstatements
checkers.reverse()
for checker in checkers:
checker.close()
def expand_files(self, modules):
"""get modules and errors from a list of modules and handle errors
"""
result, errors = expand_modules(modules, self.config.black_list)
for error in errors:
message = modname = error["mod"]
key = error["key"]
self.set_current_module(modname)
if key == "F0001":
message = str(error["ex"]).replace(os.getcwd() + os.sep, '')
self.add_message(key, args=message)
return result
def set_current_module(self, modname, filepath=None):
"""set the name of the currently analyzed module and
init statistics for it
"""
if not modname and filepath is None:
return
self.reporter.on_set_current_module(modname, filepath)
self.current_name = modname
self.current_file = filepath or modname
self.stats['by_module'][modname] = {}
self.stats['by_module'][modname]['statement'] = 0
for msg_cat in MSG_TYPES.values():
self.stats['by_module'][modname][msg_cat] = 0
# XXX hack, to be correct we need to keep module_msgs_state
# for every analyzed module (the problem stands with localized
# messages which are only detected in the .close step)
if modname:
self._module_msgs_state = {}
self._module_msg_cats_state = {}
def get_astng(self, filepath, modname):
"""return a astng representation for a module"""
try:
return MANAGER.astng_from_file(filepath, modname, source=True)
except SyntaxError, ex:
self.add_message('E0001', line=ex.lineno, args=ex.msg)
except ASTNGBuildingException, ex:
self.add_message('F0010', args=ex)
except Exception, ex:
import traceback
traceback.print_exc()
self.add_message('F0002', args=(ex.__class__, ex))
def check_astng_module(self, astng, walker, rawcheckers):
"""check a module from its astng representation, real work"""
# call raw checkers if possible
if not astng.pure_python:
self.add_message('I0001', args=astng.name)
else:
#assert astng.file.endswith('.py')
# invoke IRawChecker interface on self to fetch module/block
# level options
self.process_module(astng)
if self._ignore_file:
return False
# walk ast to collect line numbers
orig_state = self._module_msgs_state.copy()
self._module_msgs_state = {}
self.collect_block_lines(astng, orig_state)
for checker in rawcheckers:
checker.process_module(astng)
# generate events to astng checkers
walker.walk(astng)
return True
# IASTNGChecker interface #################################################
def open(self):
"""initialize counters"""
self.stats = { 'by_module' : {},
'by_msg' : {},
}
for msg_cat in MSG_TYPES.values():
self.stats[msg_cat] = 0
def close(self):
"""close the whole package /module, it's time to make reports !
if persistent run, pickle results for later comparison
"""
if self.base_name is not None:
# load previous results if any
previous_stats = config.load_results(self.base_name)
# XXX code below needs refactoring to be more reporter agnostic
self.reporter.on_close(self.stats, previous_stats)
if self.config.reports:
sect = self.make_reports(self.stats, previous_stats)
if self.config.files_output:
filename = 'pylint_global.' + self.reporter.extension
self.reporter.set_output(open(filename, 'w'))
else:
sect = Section()
if self.config.reports or self.config.output_format == 'html':
self.reporter.display_results(sect)
# save results if persistent run
if self.config.persistent:
config.save_results(self.stats, self.base_name)
# specific reports ########################################################
def report_evaluation(self, sect, stats, previous_stats):
"""make the global evaluation report"""
# check with at least check 1 statements (usually 0 when there is a
# syntax error preventing pylint from further processing)
if stats['statement'] == 0:
raise EmptyReport()
# get a global note for the code
evaluation = self.config.evaluation
try:
note = eval(evaluation, {}, self.stats)
except Exception, ex:
msg = 'An exception occurred while rating: %s' % ex
else:
stats['global_note'] = note
msg = 'Your code has been rated at %.2f/10' % note
if 'global_note' in previous_stats:
msg += ' (previous run: %.2f/10)' % previous_stats['global_note']
if self.config.comment:
msg = '%s\n%s' % (msg, config.get_note_message(note))
sect.append(Text(msg))
# some reporting functions ####################################################
def report_total_messages_stats(sect, stats, previous_stats):
"""make total errors / warnings report"""
lines = ['type', 'number', 'previous', 'difference']
lines += table_lines_from_stats(stats, previous_stats,
('convention', 'refactor',
'warning', 'error'))
sect.append(Table(children=lines, cols=4, rheaders=1))
def report_messages_stats(sect, stats, _):
"""make messages type report"""
if not stats['by_msg']:
# don't print this report when we didn't detected any errors
raise EmptyReport()
in_order = sorted([(value, msg_id)
for msg_id, value in stats['by_msg'].items()
if not msg_id.startswith('I')])
in_order.reverse()
lines = ('message id', 'occurrences')
for value, msg_id in in_order:
lines += (msg_id, str(value))
sect.append(Table(children=lines, cols=2, rheaders=1))
def report_messages_by_module_stats(sect, stats, _):
"""make errors / warnings by modules report"""
if len(stats['by_module']) == 1:
# don't print this report when we are analysing a single module
raise EmptyReport()
by_mod = {}
for m_type in ('fatal', 'error', 'warning', 'refactor', 'convention'):
total = stats[m_type]
for module in stats['by_module'].keys():
mod_total = stats['by_module'][module][m_type]
if total == 0:
percent = 0
else:
percent = float((mod_total)*100) / total
by_mod.setdefault(module, {})[m_type] = percent
sorted_result = []
for module, mod_info in by_mod.items():
sorted_result.append((mod_info['error'],
mod_info['warning'],
mod_info['refactor'],
mod_info['convention'],
module))
sorted_result.sort()
sorted_result.reverse()
lines = ['module', 'error', 'warning', 'refactor', 'convention']
for line in sorted_result:
if line[0] == 0 and line[1] == 0:
break
lines.append(line[-1])
for val in line[:-1]:
lines.append('%.2f' % val)
if len(lines) == 5:
raise EmptyReport()
sect.append(Table(children=lines, cols=5, rheaders=1))
# utilities ###################################################################
# this may help to import modules using gettext
try:
__builtins__._ = str
except AttributeError:
__builtins__['_'] = str
class ArgumentPreprocessingError(Exception):
"""Raised if an error occurs during argument preprocessing."""
def preprocess_options(args, search_for):
"""look for some options (keys of <search_for>) which have to be processed
before others
values of <search_for> are callback functions to call when the option is
found
"""
i = 0
while i < len(args):
arg = args[i]
if arg.startswith('--'):
try:
option, val = arg[2:].split('=', 1)
except ValueError:
option, val = arg[2:], None
try:
cb, takearg = search_for[option]
del args[i]
if takearg and val is None:
if i >= len(args) or args[i].startswith('-'):
raise ArgumentPreprocessingError(arg)
val = args[i]
del args[i]
cb(option, val)
except KeyError:
i += 1
else:
i += 1
class Run:
"""helper class to use as main for pylint :
run(*sys.argv[1:])
"""
LinterClass = PyLinter
option_groups = (
('Commands', 'Options which are actually commands. Options in this \
group are mutually exclusive.'),
)
def __init__(self, args, reporter=None, exit=True):
self._rcfile = None
self._plugins = []
try:
preprocess_options(args, {
# option: (callback, takearg)
'rcfile': (self.cb_set_rcfile, True),
'load-plugins': (self.cb_add_plugins, True),
})
except ArgumentPreprocessingError, e:
print >> sys.stderr, 'Argument %s expects a value.' % (e.args[0],)
sys.exit(32)
self.linter = linter = self.LinterClass((
('rcfile',
{'action' : 'callback', 'callback' : lambda *args: 1,
'type': 'string', 'metavar': '<file>',
'help' : 'Specify a configuration file.'}),
('init-hook',
{'action' : 'callback', 'type' : 'string', 'metavar': '<code>',
'callback' : cb_init_hook, 'level': 1,
'help' : 'Python code to execute, usually for sys.path \
manipulation such as pygtk.require().'}),
('help-msg',
{'action' : 'callback', 'type' : 'string', 'metavar': '<msg-id>',
'callback' : self.cb_help_message,
'group': 'Commands',
'help' : '''Display a help message for the given message id and \
exit. The value may be a comma separated list of message ids.'''}),
('list-msgs',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_list_messages,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's messages."}),
('full-documentation',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_full_documentation,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's full documentation."}),
('generate-rcfile',
{'action' : 'callback', 'callback' : self.cb_generate_config,
'group': 'Commands',
'help' : '''Generate a sample configuration file according to \
the current configuration. You can put other options before this one to get \
them in the generated configuration.'''}),
('generate-man',
{'action' : 'callback', 'callback' : self.cb_generate_manpage,
'group': 'Commands',
'help' : "Generate pylint's man page.",'hide': True}),
('errors-only',
{'action' : 'callback', 'callback' : self.cb_error_mode,
'short': 'E',
'help' : '''In error mode, checkers without error messages are \
disabled and for others, only the ERROR messages are displayed, and no reports \
are done by default'''}),
('profile',
{'type' : 'yn', 'metavar' : '<y_or_n>',
'default': False, 'hide': True,
'help' : 'Profiled execution.'}),
), option_groups=self.option_groups,
reporter=reporter, pylintrc=self._rcfile)
# register standard checkers
linter.load_default_plugins()
# load command line plugins
linter.load_plugin_modules(self._plugins)
# add some help section
linter.add_help_section('Environment variables', config.ENV_HELP, level=1)
linter.add_help_section('Output', '''
Using the default text output, the message format is :
MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE
There are 5 kind of message types :
* (C) convention, for programming standard violation
* (R) refactor, for bad code smell
* (W) warning, for python specific problems
* (E) error, for probable bugs in the code
* (F) fatal, if an error occurred which prevented pylint from doing further
processing.
''', level=1)
linter.add_help_section('Output status code', '''
Pylint should leave with following status code:
* 0 if everything went fine
* 1 if a fatal message was issued
* 2 if an error message was issued
* 4 if a warning message was issued
* 8 if a refactor message was issued
* 16 if a convention message was issued
* 32 on usage error
status 1 to 16 will be bit-ORed so you can know which different categories has
been issued by analysing pylint output status code
''', level=1)
# read configuration
linter.disable('W0704')
linter.read_config_file()
# is there some additional plugins in the file configuration, in
config_parser = linter.cfgfile_parser
if config_parser.has_option('MASTER', 'load-plugins'):
plugins = splitstrip(config_parser.get('MASTER', 'load-plugins'))
linter.load_plugin_modules(plugins)
# now we can load file config and command line, plugins (which can
# provide options) have been registered
linter.load_config_file()
if reporter:
# if a custom reporter is provided as argument, it may be overridden
# by file parameters, so re-set it here, but before command line
# parsing so it's still overrideable by command line option
linter.set_reporter(reporter)
try:
args = linter.load_command_line_configuration(args)
except SystemExit, exc:
if exc.code == 2: # bad options
exc.code = 32
raise
if not args:
print linter.help()
sys.exit(32)
# insert current working directory to the python path to have a correct
# behaviour
if len(args) == 1:
sys.path.insert(0, _get_python_path(args[0]))
else:
sys.path.insert(0, os.getcwd())
if self.linter.config.profile:
print >> sys.stderr, '** profiled run'
import cProfile, pstats
cProfile.runctx('linter.check(%r)' % args, globals(), locals(), 'stones.prof' )
data = pstats.Stats('stones.prof')
data.strip_dirs()
data.sort_stats('time', 'calls')
data.print_stats(30)
else:
linter.check(args)
sys.path.pop(0)
if exit:
sys.exit(self.linter.msg_status)
def cb_set_rcfile(self, name, value):
"""callback for option preprocessing (i.e. before optik parsing)"""
self._rcfile = value
def cb_add_plugins(self, name, value):
"""callback for option preprocessing (i.e. before optik parsing)"""
self._plugins.extend(splitstrip(value))
def cb_error_mode(self, *args, **kwargs):
"""error mode:
* disable all but error messages
* disable the 'miscellaneous' checker which can be safely deactivated in
debug
* disable reports
* do not save execution information
"""
self.linter.error_mode()
def cb_generate_config(self, *args, **kwargs):
"""optik callback for sample config file generation"""
self.linter.generate_config(skipsections=('COMMANDS',))
sys.exit(0)
def cb_generate_manpage(self, *args, **kwargs):
"""optik callback for sample config file generation"""
from pylint import __pkginfo__
self.linter.generate_manpage(__pkginfo__)
sys.exit(0)
def cb_help_message(self, option, optname, value, parser):
"""optik callback for printing some help about a particular message"""
self.linter.help_message(splitstrip(value))
sys.exit(0)
def cb_full_documentation(self, option, optname, value, parser):
"""optik callback for printing full documentation"""
self.linter.print_full_documentation()
sys.exit(0)
def cb_list_messages(self, option, optname, value, parser): # FIXME
"""optik callback for printing available messages"""
self.linter.list_messages()
sys.exit(0)
def cb_init_hook(option, optname, value, parser):
"""exec arbitrary code to set sys.path for instance"""
exec value
if __name__ == '__main__':
Run(sys.argv[1:])
|
|
"""Test the validation module"""
from __future__ import division
import sys
import warnings
import tempfile
import os
from time import sleep
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator
from sklearn.multiclass import OneVsRestClassifier
from sklearn.utils import shuffle
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection.tests.common import OneTimeSplitter
from sklearn.model_selection import GridSearchCV
try:
WindowsError
except NameError:
WindowsError = None
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
# The number of samples per class needs to be > n_splits,
# for StratifiedKFold(n_splits=3)
y2 = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 3])
P_sparse = coo_matrix(np.eye(5))
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y2)
assert_array_equal(scores, clf.score(X, y2))
# test with multioutput y
multioutput_y = np.column_stack([y2, y2[::-1]])
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
scores = cross_val_score(clf, X_sparse, y2)
assert_array_equal(scores, clf.score(X_sparse, y2))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y2.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y2.tolist())
assert_raises(ValueError, cross_val_score, clf, X, y2, scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y2)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cross_val_score, clf, X_3d, y2)
def test_cross_val_score_predict_groups():
# Check if ValueError (when groups is None) propagates to cross_val_score
# and cross_val_predict
# And also check if groups is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
assert_raise_message(ValueError,
"The groups parameter should not be None",
cross_val_score, estimator=clf, X=X, y=y, cv=cv)
assert_raise_message(ValueError,
"The groups parameter should not be None",
cross_val_predict, estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
# 3 fold cross val is used so we need atleast 3 samples per class
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_linear)
# test with callable
svm = SVC(kernel=lambda x, y: np.dot(x, y.T))
score_callable = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_callable)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cross_val_score, BrokenEstimator(), X)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_group, _, pvalue_group = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = StratifiedKFold(2)
score_group, _, pvalue_group = permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) /
y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
permutation_test_score(p, X, y, cv=5)
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cross_val_score(p, X, y, cv=5)
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
class BadCV():
def split(self, X, y=None, groups=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV())
def test_cross_val_predict_input_types():
iris = load_iris()
X, y = iris.data, iris.target
X_sparse = coo_matrix(X)
multioutput_y = np.column_stack([y, y[::-1]])
clf = Ridge(fit_intercept=False, random_state=0)
# 3 fold cv is used --> atleast 3 samples per class
# Smoke test
predictions = cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_equal(predictions.shape, (150, 2))
predictions = cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_array_equal(predictions.shape, (150, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (150,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_predict(clf, X_df, y_ser)
def test_cross_val_score_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_learning_curve():
n_samples = 30
n_splits = 3
X, y = make_classification(n_samples=n_samples, n_features=1,
n_informative=1, n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(n_samples * ((n_splits - 1) / n_splits))
for shuffle_train in [False, True]:
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=KFold(n_splits=n_splits),
train_sizes=np.linspace(0.1, 1.0, 10),
shuffle=shuffle_train)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
# Test a custom cv splitter that can iterate only once
with warnings.catch_warnings(record=True) as w:
train_sizes2, train_scores2, test_scores2 = learning_curve(
estimator, X, y,
cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples),
train_sizes=np.linspace(0.1, 1.0, 10),
shuffle=shuffle_train)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores2, train_scores)
assert_array_almost_equal(test_scores2, test_scores)
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
for shuffle_train in [False, True]:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10), shuffle=shuffle_train)
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n_splits=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_with_shuffle():
# Following test case was designed this way to verify the code
# changes made in pull request: #7506.
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [11, 12], [13, 14], [15, 16],
[17, 18], [19, 20], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18]])
y = np.array([1, 1, 1, 2, 3, 4, 1, 1, 2, 3, 4, 1, 2, 3, 4])
groups = np.array([1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 4, 4])
# Splits on these groups fail without shuffle as the first iteration
# of the learning curve doesn't contain label 4 in the training set.
estimator = PassiveAggressiveClassifier(shuffle=False)
cv = GroupKFold(n_splits=2)
train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2)
assert_array_almost_equal(train_scores_batch.mean(axis=1),
np.array([0.75, 0.3, 0.36111111]))
assert_array_almost_equal(test_scores_batch.mean(axis=1),
np.array([0.36111111, 0.25, 0.25]))
assert_raises(ValueError, learning_curve, estimator, X, y, cv=cv, n_jobs=1,
train_sizes=np.linspace(0.3, 1.0, 3), groups=groups)
train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2,
exploit_incremental_learning=True)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_validation_curve_cv_splits_consistency():
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=100, random_state=0)
scores1 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
# The OneTimeSplitter is a non-re-entrant cv splitter. Unless, the
# `split` is called for each parameter, the following should produce
# identical results for param setting 1 and param setting 2 as both have
# the same C value.
assert_array_almost_equal(*np.vsplit(np.hstack(scores1)[(0, 2, 1, 3), :],
2))
scores2 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits, shuffle=True))
# For scores2, compare the 1st and 2nd parameter's scores
# (Since the C value for 1st two param setting is 0.1, they must be
# consistent unless the train test folds differ between the param settings)
assert_array_almost_equal(*np.vsplit(np.hstack(scores2)[(0, 2, 1, 3), :],
2))
scores3 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits))
# OneTimeSplitter is basically unshuffled KFold(n_splits=5). Sanity check.
assert_array_almost_equal(np.array(scores3), np.array(scores1))
def test_check_is_permutation():
rng = np.random.RandomState(0)
p = np.arange(100)
rng.shuffle(p)
assert_true(_check_is_permutation(p, 100))
assert_false(_check_is_permutation(np.delete(p, 23), 100))
p[0] = 23
assert_false(_check_is_permutation(p, 100))
# Check if the additional duplicate indices are caught
assert_false(_check_is_permutation(np.hstack((p, 0)), 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cross_val_predict(classif, X, y, cv=10)
preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
def check_cross_val_predict_with_method(est):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=0)
classes = len(set(y))
kfold = KFold(len(iris.target))
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
predictions = cross_val_predict(est, X, y, method=method)
assert_equal(len(predictions), len(y))
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
# Naive loop (should be same as cross_val_predict):
for train, test in kfold.split(X, y):
est.fit(X[train], y[train])
expected_predictions[test] = func(X[test])
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold)
assert_array_almost_equal(expected_predictions, predictions)
# Test alternative representations of y
predictions_y1 = cross_val_predict(est, X, y + 1, method=method,
cv=kfold)
assert_array_equal(predictions, predictions_y1)
predictions_y2 = cross_val_predict(est, X, y - 2, method=method,
cv=kfold)
assert_array_equal(predictions, predictions_y2)
predictions_ystr = cross_val_predict(est, X, y.astype('str'),
method=method, cv=kfold)
assert_array_equal(predictions, predictions_ystr)
def test_cross_val_predict_with_method():
check_cross_val_predict_with_method(LogisticRegression())
def test_gridsearchcv_cross_val_predict_with_method():
est = GridSearchCV(LogisticRegression(random_state=42),
{'C': [0.1, 1]},
cv=2)
check_cross_val_predict_with_method(est)
def get_expected_predictions(X, y, cv, classes, est, method):
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
expected_predictions_ = func(X[test])
# To avoid 2 dimensional indexing
exp_pred_test = np.zeros((len(test), classes))
if method is 'decision_function' and len(est.classes_) == 2:
exp_pred_test[:, est.classes_[-1]] = expected_predictions_
else:
exp_pred_test[:, est.classes_] = expected_predictions_
expected_predictions[test] = exp_pred_test
return expected_predictions
def test_cross_val_predict_class_subset():
X = np.arange(8).reshape(4, 2)
y = np.array([0, 0, 1, 2])
classes = 3
kfold3 = KFold(n_splits=3)
kfold4 = KFold(n_splits=4)
le = LabelEncoder()
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
est = LogisticRegression()
# Test with n_splits=3
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold3)
# Runs a naive loop (should be same as cross_val_predict):
expected_predictions = get_expected_predictions(X, y, kfold3, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
# Test with n_splits=4
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold4)
expected_predictions = get_expected_predictions(X, y, kfold4, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
# Testing unordered labels
y = [1, 1, -4, 6]
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold3)
y = le.fit_transform(y)
expected_predictions = get_expected_predictions(X, y, kfold3, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
def test_score_memmap():
# Ensure a scalar score of memmap type is accepted
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
tf = tempfile.NamedTemporaryFile(mode='wb', delete=False)
tf.write(b'Hello world!!!!!')
tf.close()
scores = np.memmap(tf.name, dtype=np.float64)
score = np.memmap(tf.name, shape=(), mode='r', dtype=np.float64)
try:
cross_val_score(clf, X, y, scoring=lambda est, X, y: score)
# non-scalar should still fail
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=lambda est, X, y: scores)
finally:
# Best effort to release the mmap file handles before deleting the
# backing file under Windows
scores, score = None, None
for _ in range(3):
try:
os.unlink(tf.name)
break
except WindowsError:
sleep(1.)
def test_permutation_test_score_pandas():
# check permutation_test_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
iris = load_iris()
X, y = iris.data, iris.target
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
permutation_test_score(clf, X_df, y_ser)
|
|
"""Helpers that help with state related things."""
import asyncio
import datetime as dt
import json
import logging
from collections import defaultdict
from types import TracebackType
from typing import ( # noqa: F401 pylint: disable=unused-import
Awaitable, Dict, Iterable, List, Optional, Tuple, Type, Union)
from homeassistant.loader import bind_hass
import homeassistant.util.dt as dt_util
from homeassistant.components.notify import (
ATTR_MESSAGE, SERVICE_NOTIFY)
from homeassistant.components.sun import (
STATE_ABOVE_HORIZON, STATE_BELOW_HORIZON)
from homeassistant.components.mysensors.switch import (
ATTR_IR_CODE, SERVICE_SEND_IR_CODE)
from homeassistant.components.cover import (
ATTR_POSITION, ATTR_TILT_POSITION)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_OPTION, SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_HOME, SERVICE_ALARM_DISARM, SERVICE_ALARM_TRIGGER,
SERVICE_LOCK, SERVICE_TURN_OFF, SERVICE_TURN_ON, SERVICE_UNLOCK,
SERVICE_OPEN_COVER,
SERVICE_CLOSE_COVER, SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED, STATE_ALARM_TRIGGERED,
STATE_CLOSED, STATE_HOME, STATE_LOCKED, STATE_NOT_HOME, STATE_OFF,
STATE_ON, STATE_OPEN, STATE_UNKNOWN,
STATE_UNLOCKED, SERVICE_SELECT_OPTION)
from homeassistant.core import (
Context, State, DOMAIN as HASS_DOMAIN)
from homeassistant.util.async_ import run_coroutine_threadsafe
from .typing import HomeAssistantType
_LOGGER = logging.getLogger(__name__)
GROUP_DOMAIN = 'group'
# Update this dict of lists when new services are added to HA.
# Each item is a service with a list of required attributes.
SERVICE_ATTRIBUTES = {
SERVICE_NOTIFY: [ATTR_MESSAGE],
SERVICE_SEND_IR_CODE: [ATTR_IR_CODE],
SERVICE_SELECT_OPTION: [ATTR_OPTION],
SERVICE_SET_COVER_POSITION: [ATTR_POSITION],
SERVICE_SET_COVER_TILT_POSITION: [ATTR_TILT_POSITION]
}
# Update this dict when new services are added to HA.
# Each item is a service with a corresponding state.
SERVICE_TO_STATE = {
SERVICE_TURN_ON: STATE_ON,
SERVICE_TURN_OFF: STATE_OFF,
SERVICE_ALARM_ARM_AWAY: STATE_ALARM_ARMED_AWAY,
SERVICE_ALARM_ARM_HOME: STATE_ALARM_ARMED_HOME,
SERVICE_ALARM_DISARM: STATE_ALARM_DISARMED,
SERVICE_ALARM_TRIGGER: STATE_ALARM_TRIGGERED,
SERVICE_LOCK: STATE_LOCKED,
SERVICE_UNLOCK: STATE_UNLOCKED,
SERVICE_OPEN_COVER: STATE_OPEN,
SERVICE_CLOSE_COVER: STATE_CLOSED
}
class AsyncTrackStates:
"""
Record the time when the with-block is entered.
Add all states that have changed since the start time to the return list
when with-block is exited.
Must be run within the event loop.
"""
def __init__(self, hass: HomeAssistantType) -> None:
"""Initialize a TrackStates block."""
self.hass = hass
self.states = [] # type: List[State]
# pylint: disable=attribute-defined-outside-init
def __enter__(self) -> List[State]:
"""Record time from which to track changes."""
self.now = dt_util.utcnow()
return self.states
def __exit__(self, exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType]) -> None:
"""Add changes states to changes list."""
self.states.extend(get_changed_since(self.hass.states.async_all(),
self.now))
def get_changed_since(states: Iterable[State],
utc_point_in_time: dt.datetime) -> List[State]:
"""Return list of states that have been changed since utc_point_in_time."""
return [state for state in states
if state.last_updated >= utc_point_in_time]
@bind_hass
def reproduce_state(hass: HomeAssistantType,
states: Union[State, Iterable[State]],
blocking: bool = False) -> None:
"""Reproduce given state."""
return run_coroutine_threadsafe( # type: ignore
async_reproduce_state(hass, states, blocking), hass.loop).result()
@bind_hass
async def async_reproduce_state(
hass: HomeAssistantType,
states: Union[State, Iterable[State]],
blocking: bool = False,
context: Optional[Context] = None) -> None:
"""Reproduce a list of states on multiple domains."""
if isinstance(states, State):
states = [states]
to_call = defaultdict(list) # type: Dict[str, List[State]]
for state in states:
to_call[state.domain].append(state)
async def worker(domain: str, data: List[State]) -> None:
component = getattr(hass.components, domain)
if hasattr(component, 'async_reproduce_states'):
await component.async_reproduce_states(
data,
context=context)
else:
await async_reproduce_state_legacy(
hass,
domain,
data,
blocking=blocking,
context=context)
if to_call:
# run all domains in parallel
await asyncio.gather(*[
worker(domain, data)
for domain, data in to_call.items()
])
@bind_hass
async def async_reproduce_state_legacy(
hass: HomeAssistantType,
domain: str,
states: Iterable[State],
blocking: bool = False,
context: Optional[Context] = None) -> None:
"""Reproduce given state."""
to_call = defaultdict(list) # type: Dict[Tuple[str, str], List[str]]
if domain == GROUP_DOMAIN:
service_domain = HASS_DOMAIN
else:
service_domain = domain
for state in states:
if hass.states.get(state.entity_id) is None:
_LOGGER.warning("reproduce_state: Unable to find entity %s",
state.entity_id)
continue
domain_services = hass.services.async_services().get(service_domain)
if not domain_services:
_LOGGER.warning(
"reproduce_state: Unable to reproduce state %s (1)", state)
continue
service = None
for _service in domain_services.keys():
if (_service in SERVICE_ATTRIBUTES and
all(attr in state.attributes
for attr in SERVICE_ATTRIBUTES[_service]) or
_service in SERVICE_TO_STATE and
SERVICE_TO_STATE[_service] == state.state):
service = _service
if (_service in SERVICE_TO_STATE and
SERVICE_TO_STATE[_service] == state.state):
break
if not service:
_LOGGER.warning(
"reproduce_state: Unable to reproduce state %s (2)", state)
continue
# We group service calls for entities by service call
# json used to create a hashable version of dict with maybe lists in it
key = (service,
json.dumps(dict(state.attributes), sort_keys=True))
to_call[key].append(state.entity_id)
domain_tasks = [] # type: List[Awaitable[Optional[bool]]]
for (service, service_data), entity_ids in to_call.items():
data = json.loads(service_data)
data[ATTR_ENTITY_ID] = entity_ids
domain_tasks.append(
hass.services.async_call(service_domain, service, data, blocking,
context)
)
if domain_tasks:
await asyncio.wait(domain_tasks, loop=hass.loop)
def state_as_number(state: State) -> float:
"""
Try to coerce our state to a number.
Raises ValueError if this is not possible.
"""
from homeassistant.components.climate import (
STATE_HEAT, STATE_COOL, STATE_IDLE)
if state.state in (STATE_ON, STATE_LOCKED, STATE_ABOVE_HORIZON,
STATE_OPEN, STATE_HOME, STATE_HEAT, STATE_COOL):
return 1
if state.state in (STATE_OFF, STATE_UNLOCKED, STATE_UNKNOWN,
STATE_BELOW_HORIZON, STATE_CLOSED, STATE_NOT_HOME,
STATE_IDLE):
return 0
return float(state.state)
|
|
import time
import cv2
from ev3control.rpc import Robot
from rick.controllers import *
from rick.A_star_planning import *
from rick.core import State
from rick.core import main_loop
from rick.async import AsyncCamera
from rick.utils import TrackerWrapper,plot_bbox
from nn_object_detection.object_detectors import NNObjectDetector
from rick.live_plotting import MapRenderer
from detection.marker_localization import get_marker_pose, load_camera_params
import cv2.aruco as aruco
from detection.marker_localization import get_specific_marker_pose, load_camera_params
import numpy as np
from rick.mc_please_github_donot_fuck_with_this_ones import A_star_path_planning_control,compute_A_star_path, A_star_control
from math import pi
from detection.opencv import get_lego_boxes,BB_ligth_green
from rick.motion_control import euclidian_kalman , kalman_filter , kalman_filter2 , robot_control, odom_estimation
import sys
sys.path.append("../slam/")
import mapping
import matplotlib.pyplot as plt
from detection.opencv import detect_purple
PATH_TO_CKPT = "/home/dlrc/projects/DLRCev3/object_detection/nn_object_detection/tf_train_dir/models/faster_rcnn_resnet_lego_v1/train/frozen_inference_graph.pb"
PATH_TO_LABELS = "/home/dlrc/projects/DLRCev3/object_detection/nn_object_detection/tf_train_dir/data/label_map.pbtxt"
print("Creating robot...")
data = np.load('Homographygood.npz')
H=data["arr_0"]
map_renderer = MapRenderer()
def plot_mapa(mapa,robot_traj):
mapa1 = np.array(mapa)
rob = np.array(robot_traj)
print("Before stop")
if mapa1.size:
print("In")
plt.scatter(mapa1[:,0],mapa1[:,1])
print("Out")
if rob.size > 100:
plt.plot(rob[:,0],rob[:,1])
plt.axis([-100, 150, -100, 150])
plt.legend(["Lego", "path"])
plt.show(block=False)
print("After stop")
def search_control(state_search,mapa, pos_rob, t_old):
t1 = 0
if state_search ==1:
target = [0.1,0.1]# THE POINT REPRESENTS THE MIDDLE OF THE WORKSPACE
vel_wheels = robot_control(pos_rob,target, K_x=1,K_y=1,K_an=1)
distance = np.sqrt(np.power(pos_rob[0]-target[0],2) + np.power(pos_rob[1]-target[1],2))
if distance < 10:
state_search = 2
t1 = time.time()
elif state_search ==2:
vel_wheels = [-100,100]
return vel_wheels,state_search,t1
def index23(BB_legos,BB_target):
index=1000
i=0
for box in BB_legos:
if box[0]==BB_target[0][0] and box[1] == BB_target[0][1]:
index = i
i+=1
return index
# LOOKING FOR THE TARGET
def search_target_with_Kalman_and_mapping(robot, frame
, ltrack_pos=0, rtrack_pos=0, P=np.identity(3), marker_list = [], delete_countdown = 0 , mapa = [], robot_trajectory = [],R=[],state_search = 2 , t1=0):
new_ltrack_pos = robot.left_track.position
new_rtrack_pos = robot.right_track.position
odom_l, odom_r = new_ltrack_pos - ltrack_pos, new_rtrack_pos - rtrack_pos
###################### Markers information coming from the compuetr vision stuff
###################### Information related with lego blocks mapping
BB_target2=BB_ligth_green(frame)
if BB_target2:
BB_target=BB_target2[0]
plot_bbox(frame, BB_target)
else:
BB_target=[]
delete_countdown=0
#GET LIST OF LEGO LANDMARKS
#lego_landmarks = mapping.cam2rob(BB_legos,H)
print("####################################################################################")
#################### WHAT SLAM IS!
######## 1. ESTIMATE POSITION BY ODOMETRY
estim_rob_pos_odom = odom_estimation(odom_r,odom_l,robot.position)
####### 2. UPDATE THE MAP WITH ODOMETRY INFO
#mapa, delete_countdown,robot_trajectory = mapping.update_mapa(mapa,lego_landmarks,estim_rob_pos_odom,P,delete_countdown, robot_trajectory, index)
mapa=[]
marker_map = np.array([[200,100,0],[50, 0 , 0],[100,0,0],[0,100,0],[100,100,0],[200,0,0]])
####### 3. KALMAN FILTER
Ts = 0.3
estim_rob_pos, P = kalman_filter(odom_r,odom_l,robot.position,marker_list, marker_map,Ts,P)
robot.position = estim_rob_pos
#print("rob_pos odom:", estim_rob_pos_odom, " rob_pos -Kalman", estim_rob_pos)
####### 4. UPDATE MAP POINTS RELATED TO KALMAN
mapa = mapping.after_kalman_improvement(mapa, robot.position, estim_rob_pos_odom)
#### GET GRIPPER POS
d = np.ones(3)
d[0] = estim_rob_pos[0] + 28 *np.cos(estim_rob_pos[2] * pi/180)
d[1] = estim_rob_pos[1] + 28* np.sin(estim_rob_pos[2]*pi/180)
d[2] = estim_rob_pos[2]
R.append(d)
#SHOW THE MAP
map_renderer.plot_bricks_and_trajectory(mapa, R)
#DEFINE MOTION CONTROL FOR SEARCHING
# THE CONTROL IS : 1. GO TO THE CENTER OF THE WORKSPACE, 2. ROUND FOR 2 secs , SELECT A POINT CLOSE TO THE CENTER as new target
vel_wheels,state_search,t1 = search_control(state_search, mapa, robot.position, t1)
if len(BB_target) > 0:
robot.tracker.init(frame, BB_target)
return ("GO_TO_TARGET", frame, {"tracker" : robot.tracker, "ltrack_pos" : robot.left_track.position ,"rtrack_pos" : robot.right_track.position, "robot_trajectory": robot_trajectory,"pos_rob" : robot.position,"R" : R, "mapa" : mapa})
else:
robot.move(vel_left=vel_wheels[1], vel_right=vel_wheels[0])
return "SEARCH_TARGET", frame, {"ltrack_pos": new_ltrack_pos, "rtrack_pos": new_rtrack_pos, "P": P , "marker_list": [],
"delete_countdown" : delete_countdown , "mapa": mapa, "robot_trajectory": robot_trajectory, "R" : R,
"state_search" : 2, "t1" : t1 }
#GO TO TARGET
def move_to_brick_v3(robot, frame, img_res=np.asarray((640, 480)), atol=10,
vel_forward = 200, vel_rot = 50, atol_move_blind=30,
fail_counter=0, center_position_error = 25, tracker=None,ltrack_pos=0 ,rtrack_pos=0, pos_rob=[],marker_list=[],P = np.identity(3),R = [], mapa = [], robot_trajectory = []):
#LOCALIZATION AND MAPPING
new_ltrack_pos = robot.left_track.position
new_rtrack_pos = robot.right_track.position
odom_l, odom_r = new_ltrack_pos - ltrack_pos, new_rtrack_pos - rtrack_pos
###################### Markers information coming from the compuetr vision stuff
#################3 RELATED WITH LEGOS
BB_target2=BB_ligth_green(frame)
if BB_target2:
BB_target=BB_target2[0]
plot_bbox(frame, BB_target)
else:
BB_target=[]
print("####################################################################################")
#################### WHAT SLAM IS!
######## 1. ESTIMATE POSITION BY ODOMETRY
delete_countdown=0
estim_rob_pos_odom = odom_estimation(odom_r,odom_l,robot.position)
####### 2. UPDATE THE MAP WITH ODOMETRY INFO
#mapa, delete_countdown,robot_trajectory = mapping.update_mapa(mapa,lego_landmarks,estim_rob_pos_odom,P,delete_countdown, robot_trajectory, index)
####### 3. KALMAN FILTER
marker_map = np.array([[200,100,0],[50, 0 , 0],[100,0,0],[0,100,0],[100,100,0],[200,0,0]])
Ts = 0.3
estim_rob_pos, P = kalman_filter(odom_r,odom_l,robot.position,marker_list, marker_map,Ts,P)
robot.position = estim_rob_pos
print("rob_pos odom:", estim_rob_pos_odom, " rob_pos -Kalman", estim_rob_pos)
####### 4. UPDATE MAP POINTS RELATED TO KALMAN
#mapa = mapping.after_kalman_improvement(mapa, robot.position, estim_rob_pos_odom)
mapa=[]
#### GET GRIPPER POS
d = np.ones(3)
d[0] = estim_rob_pos[0] + 28 *np.cos(estim_rob_pos[2] * pi/180)
d[1] = estim_rob_pos[1] + 28* np.sin(estim_rob_pos[2]*pi/180)
d[2] = estim_rob_pos[2]
R.append(d)
map_renderer.plot_bricks_and_trajectory(mapa, R)
###################### Information related with lego blocks mapping
ok, bbox = tracker.update(frame)
if not ok:
BB_legos=get_lego_boxes(frame)
# res = robot.object_detector.detect_with_threshold(frame,threshold=0.9, return_closest=False)
# BB_legos = map(lambda x: x[0], res)
BB_target = detect_purple(frame,BB_legos)
if len(BB_target) == 0:
return "SEARCH_TARGET", frame, {"ltrack_pos": new_ltrack_pos, "rtrack_pos": new_rtrack_pos, "P": P , "marker_list": [],
"delete_countdown" : delete_countdown , "mapa": mapa, "robot_trajectory": robot_trajectory, "R" : R,
"state_search" : 2}
tracker.init(frame, BB_target[0])
bbox = BB_target[0]
#MOVE TO THE TARGET
coords = bbox_center(*bbox)
img_center = img_res / 2 - center_position_error
error = img_center - coords
atol = 5 + coords[1]/480 * 25
cv2.line(frame, (5+int(img_center[0]),0), (25+int(img_center[0]),480), (255,0,0))
cv2.line(frame, (-5+int(img_center[0]),0), (-25+int(img_center[0]),480), (255,0,0))
cv2.line(frame, (int(img_center[0]),0), (int(img_center[0]),480), (0,255,0))
cv2.line(frame,(0,480-atol_move_blind),(640,480-atol_move_blind), (0,255,0))
frame = plot_bbox(frame,bbox, 0, (255,0,0))
#img_center = img_res/2.
if np.isclose(coords[0], img_center[0], atol=atol) and np.isclose(coords[1], img_res[1], atol=atol_move_blind):
robot.move_straight(vel_forward, 500)
return "MOVE_TO_BRICK_BLIND_AND_GRIP", frame, {"R":R,"ltrack_pos" : new_ltrack_pos ,"rtrack_pos" : new_rtrack_pos,"mapa":mapa}
if np.isclose(coords[0], img_center[0], atol=atol):
print("Move straight")
robot.move_straight(vel_forward)
return "GO_TO_TARGET", frame, {"tracker" : tracker, "ltrack_pos" : new_ltrack_pos ,"rtrack_pos" : new_rtrack_pos, "pos_rob" : robot.position,"R" : R, "mapa" : mapa}
elif error[0] < 0:
robot.rotate_left(vel=vel_rot)
return "GO_TO_TARGET", frame, {"tracker" : tracker, "ltrack_pos" : new_ltrack_pos ,"rtrack_pos" : new_rtrack_pos, "pos_rob" : robot.position,"R" : R, "mapa" : mapa}
else:
# Positive velocity for turning left
robot.rotate_right(vel=vel_rot)
return "GO_TO_TARGET", frame, {"tracker" : tracker, "ltrack_pos" : new_ltrack_pos ,"rtrack_pos" : new_rtrack_pos, "pos_rob" : robot.position,"R" : R, "mapa" : mapa}
#MOVE TO BRICK BLIND
def move_to_brick_blind_and_grip(robot, frame, R=[],ltrack_pos=0 ,rtrack_pos=0,marker_list=[],mapa=[], vel=500, t=1500):
# Make sure the grip is open
robot.grip.open()
print("Velocity: ", vel)
# Make sure the elevator is down
print(robot.elevator.is_raised)
print(robot.elevator.position)
robot.elevator.down()
robot.elevator.wait_until_not_moving()
robot.move_straight(vel=vel, time=t)
robot.wait_until_not_moving()
robot.pick_up()
#odometry update
marker_map = np.array([[200,100,0],[50, 0 , 0],[100,0,0],[0,100,0],[100,100,0],[200,0,0]])
P = np.identity(3)
new_ltrack_pos = robot.left_track.position
new_rtrack_pos = robot.right_track.position
odom_l, odom_r = new_ltrack_pos - ltrack_pos, new_rtrack_pos - rtrack_pos
Ts = 0.3
estim_rob_pos, P = kalman_filter(odom_r,odom_l,robot.position,marker_list, marker_map,Ts,P)
robot.position = estim_rob_pos
return "SEARCH_BOX", frame, {"ltrack_pos": new_ltrack_pos, "rtrack_pos": new_rtrack_pos
, "mapa": mapa, "R" : R}
#SEARCH BOX
def search_box(robot, frame, ltrack_pos=0, rtrack_pos=0, P=np.identity(3), marker_list = [], delete_countdown = 0 , mapa = [], robot_trajectory = [],R=[],state_search = 2 , t1=0):
new_ltrack_pos = robot.left_track.position
new_rtrack_pos = robot.right_track.position
odom_l, odom_r = new_ltrack_pos - ltrack_pos, new_rtrack_pos - rtrack_pos
estim_rob_pos_odom = odom_estimation(odom_r,odom_l,robot.position)
marker_map = np.array([[150,0,0]])
marker_map_obj = np.array([[110,0,0]])
Ts = 0.3
estim_rob_pos, P = kalman_filter(odom_r,odom_l,robot.position,marker_list, marker_map_obj,Ts,P)
robot.position = estim_rob_pos
d = np.ones(3)
d[0] = estim_rob_pos[0] + 28 *np.cos(estim_rob_pos[2] * pi/180)
d[1] = estim_rob_pos[1] + 28* np.sin(estim_rob_pos[2]*pi/180)
d[2] = estim_rob_pos[2]
R.append(d)
map_renderer.plot_bricks_and_trajectory(mapa, R)
mtx,dist=load_camera_params()
frame,box_coords = get_specific_marker_pose(frame=frame,mtx=mtx,dist=dist,marker_id=0,markerLength=8.6)
vel_wheels,state_search,t1 = search_control(state_search, mapa, robot.position, t1)
if box_coords:
return ("COMPUTE_PATH", frame, {"box_coords": box_coords, "ltrack_pos": new_ltrack_pos, "rtrack_pos": new_rtrack_pos, "mapa": mapa, "R" : R })
else:
robot.move(vel_left=vel_wheels[1], vel_right=vel_wheels[0])
return "SEARCH_BOX", frame, {"ltrack_pos": new_ltrack_pos, "rtrack_pos": new_rtrack_pos, "P": P , "marker_list": [],
"delete_countdown" : delete_countdown , "mapa": mapa, "robot_trajectory": robot_trajectory, "R" : R,
"state_search" : 2, "t1" : t1 }
def compute_path(robot,frame,box_coords, ltrack_pos = 0, rtrack_pos = 0, mapa = [], R = []):
x=box_coords[0]
y=box_coords[1]
yaw=box_coords[2]
if (y>0 and yaw>-80) or (y<0 and yaw< -100):
print("NICE PATH")
thm=40
thobj=40
x2=x+thm*np.sin(yaw*np.pi/180.)
y2=y-thm*np.cos(yaw*np.pi/180.)
yaw2=0
xobj=x+thobj*np.sin(yaw*np.pi/180.)
yobj=y-thobj*np.cos(yaw*np.pi/180.)
obj=[x,y]
obslist=[]
Map=create_map(obslist)
path=A_star([0,0],obj, Map)
robot.grip.close()
R=R
return ("MOVE_TO_BOX",frame, {"Map": Map, "obj":obj, "ltrack_pos": ltrack_pos,
"rtrack_pos": rtrack_pos,
"TIME": time.time(), "R":R})
def A_star_move_to_box_blind(robot, frame, Map,obj=[100,0,0], replan=1,
path=[], iteration=0, ltrack_pos=0, rtrack_pos=0, TIME=0, P = np.identity(3),R=[]):
mtx,dist=load_camera_params()
frame,box_coords = get_specific_marker_pose(frame=frame,mtx=mtx,dist=dist,marker_id=0,markerLength=8.6)
old_path=path
#REPLANNING
new_ltrack_pos = robot.left_track.position
new_rtrack_pos = robot.right_track.position
odom_l, odom_r = new_ltrack_pos - ltrack_pos, new_rtrack_pos - rtrack_pos
marker_list = []
if box_coords:
print("REPLANNNIG")
x=box_coords[0]
y=box_coords[1]
yaw=box_coords[2]
thobj=40
xobj=x+thobj*np.sin(yaw*np.pi/180.)
yobj=y-thobj*np.cos(yaw*np.pi/180.)
obj=[xobj+robot.position[0],yobj+robot.position[1]]
angle = np.arctan2(yobj,xobj)
distance = np.sqrt(np.power(xobj,2) + np.power(yobj,2))
marker_list.append([angle,distance,(yaw+90)*pi/180])
print("MARKER POSITION X AND Y: ", x , y)
marker_map = np.array([[100,0,0]])
marker_map_obj = np.array([[60,0,0]])
Ts = 0.3
estim_rob_pos, P = kalman_filter2(odom_r,odom_l,robot.position,marker_list, marker_map_obj,Ts,P)
d = np.ones(3)
d[0] = estim_rob_pos[0] + 28 *np.cos(estim_rob_pos[2] * pi/180)
d[1] = estim_rob_pos[1] + 28* np.sin(estim_rob_pos[2]*pi/180)
d[2] = estim_rob_pos[2]
R.append(d)
mapa=[]
map_renderer.plot_bricks_and_trajectory(mapa, R)
robot.position= estim_rob_pos
print("robot_estim_pos_Astar: ", robot.position)
#update map
#plt.close()
#fig = plt.figure()
#ax = fig.gca()
#ax.set_xticks(np.arange(-10, 150, 1))
#ax.set_yticks(np.arange(-40, 40, 1))
#path=A_star(robot.position[0:2], marker_map_obj[0,0:2], Map)
#plt.plot(path[:,0],path[:,1])
#plt.show(block=False)
#print("PATH FROM THE REPLANNING",path.shape)
replan=1
goal_pos=marker_map_obj[0,:]
t0 = time.time()
vel_wheels, new_path = A_star_control(robot.position,goal_pos,
Map, robot.sampling_rate,
odom_r= odom_r,odom_l=odom_l,
iteration=iteration, path=path)
#print("DIFFERENTCE WITH THE GOAL:",abs(estim_rob_pos[0]-goal_pos[0]),abs(estim_rob_pos[1]-goal_pos[1]))
#CONDITION FOR EXITTING
distance_to_target = np.sqrt(np.power(estim_rob_pos[0]-marker_map_obj[0,0],2)+ np.power(estim_rob_pos[1]-marker_map_obj[0,1],2))
print("###########################################################################################################")
print("disatnce to target: ", distance_to_target)
print("estimated vs goal", estim_rob_pos[0:2],goal_pos)
print("###########################################################################################################")
if abs(estim_rob_pos[0]-marker_map_obj[0,0]) < 20 and abs(estim_rob_pos[1]-marker_map_obj[0,1]) < 10:
return ("MOVE_TO_BOX_BY_VISION", frame, {"replan":replan,"iteration" : iteration, "path" : new_path, "ltrack_pos": new_ltrack_pos, "rtrack_pos": new_rtrack_pos, "TIME": t0})
robot.move(vel_left=vel_wheels[1], vel_right=vel_wheels[0])
iteration += 1
return ("MOVE_TO_BOX", frame, {"replan":replan,"Map":Map,"obj":goal_pos,"iteration" : iteration, "path" : new_path,
"ltrack_pos": new_ltrack_pos, "rtrack_pos": new_rtrack_pos, "TIME": t0,"R":R})
def PID_control(robot, marker_map, box_coords,hist):
vel_st=100
vel_rot=40
lat_tol=4
vel_st2=100
yshift=2
er_x = marker_map[0,0] - robot[0]
er_y = marker_map[0,1] - robot[1]
er_angle = np.arctan2(er_y, er_x) - robot[2]*pi/180
print("ANGLES WITH MARKER,WITH WORLD AND ERROR",np.arctan2(er_y, er_x)*180/pi,robot[2],er_angle*180/pi)
if er_angle > pi:
er_angle = er_angle - 2*pi
if er_angle < -pi:
er_angle = er_angle + 2*pi
distance = np.sqrt(np.power(er_x,2)+np.power(er_y,2))
if box_coords:
print("Y_DISTANCE_TO_MARKER",box_coords[1])
if abs(box_coords[1]+yshift)>lat_tol and box_coords[0]>35:
vel_wheels=np.asarray([-vel_rot,vel_rot])*np.sign(-box_coords[1]-yshift)+np.asarray([vel_st2,vel_st2])
print("Case 1")
if abs(box_coords[1]+yshift)>lat_tol and box_coords[0]<35:
vel_wheels=np.asarray([-vel_rot,vel_rot])*np.sign(-box_coords[1]-yshift)
print("Case 2")
elif box_coords[0]>35:
vel_wheels=np.asarray([vel_st,vel_st])
print("Case3")
else:
vel_wheels=np.asarray([0,0])
hist = 0
print("STOP")
else:
if hist == 0:
vel_wheels=np.asarray([0,0])
elif hist==2:
vel_wheels=np.asarray([vel_rot,-vel_rot])*np.sign(er_angle)
hist=np.sign(er_angle)
print("first hist",hist)
elif er_angle > 0.7:
vel_wheels=np.asarray([vel_rot,-vel_rot])
hist = 1
elif er_angle <-0.7:
vel_wheels=np.asarray([-vel_rot,vel_rot])
hist = -1
elif hist ==1 :
vel_wheels=np.asarray([vel_rot,-vel_rot])
elif hist==-1 :
vel_wheels=np.asarray([-vel_rot,vel_rot])
print("CORRECTING ANGLE and HISTERESIS",er_angle,hist)
return vel_wheels, hist
def move_to_box_by_vision(robot, frame, replan=1,
path=[], iteration=0, ltrack_pos=0, rtrack_pos=0, TIME=0, P = np.identity(3),
histeresis = 2):
mtx,dist=load_camera_params()
frame,box_coords = get_specific_marker_pose(frame=frame,mtx=mtx,dist=dist,marker_id=0,markerLength=8.6)
new_ltrack_pos = robot.left_track.position
new_rtrack_pos = robot.right_track.position
odom_l, odom_r = new_ltrack_pos - ltrack_pos, new_rtrack_pos - rtrack_pos
marker_list = []
if box_coords:
x=box_coords[0]
y=box_coords[1]
yaw=box_coords[2]
thobj=40
xobj=x+thobj*np.sin(yaw*np.pi/180.)
yobj=y-thobj*np.cos(yaw*np.pi/180.)
obj=[xobj+robot.position[0],yobj+robot.position[1]]
angle = np.arctan2(yobj,xobj)
distance = np.sqrt(np.power(xobj,2) + np.power(yobj,2))
marker_list.append([angle,distance,(yaw+90)*pi/180])
print("MARKER POSITION X AND Y: ", x , y)
marker_map = np.array([[100,0,0]])
marker_map_obj = np.array([[60,0,0]])
Ts = 0.3
estim_rob_pos, P = kalman_filter2(odom_r,odom_l,robot.position,marker_list, marker_map_obj,Ts,P)
robot.position= estim_rob_pos
print("robot_estim_pos: ", robot.position)
vel_wheels, hist = PID_control(estim_rob_pos, marker_map,box_coords, histeresis)
if hist==0:
return "PLACE_OBJECT_IN_THE_BOX",frame,{}
robot.move(vel_wheels[0],vel_wheels[1])
return ("MOVE_TO_BOX_BY_VISION", frame, {"ltrack_pos": new_ltrack_pos, "rtrack_pos" : new_rtrack_pos, "histeresis" : hist})
def place_object_in_the_box(robot,frame):
robot.move(vel_left=100,vel_right=100,time=2000)
print("MOVING")
robot.left_track.wait_until_not_moving(timeout=3000)
robot.reset()
robot.grip.wait_until_not_moving(timeout=3000)
print("finish")
return "FINAL_STATE"
with Robot(AsyncCamera(1), tracker=TrackerWrapper(cv2.TrackerKCF_create), object_detector=None ) as robot:
robot.map = [(200, 0)]
robot.sampling_rate = 0.1
print("These are the robot motor positions before planning:", robot.left_track.position, robot.right_track.position)
# Define the state graph, we can do this better, currently each method
# returns the next state name
states = [
State(
name="SEARCH_TARGET",
act=search_target_with_Kalman_and_mapping,
default_args={
"ltrack_pos": robot.left_track.position,
"rtrack_pos": robot.right_track.position,
"P" : np.identity(3),
"delete_countdown" : 0,
"mapa": [],
"robot_trajectory": []
}
),
State(
name="GO_TO_TARGET",
act=move_to_brick_v3,
default_args={
"vel_forward" : 200,
"vel_rot" : 60,
"atol_move_blind" : 100
}
),
State(
name="MOVE_TO_BRICK_BLIND_AND_GRIP",
act=move_to_brick_blind_and_grip,
default_args={"vel": 250,
"t" : 1200,
"ltrack_pos": robot.left_track.position,
"rtrack_pos": robot.right_track.position,
}
),
State(
name="SEARCH_BOX",
act=search_box,
default_args={
"ltrack_pos": robot.left_track.position,
"rtrack_pos": robot.right_track.position,
}
),
State(
name="COMPUTE_PATH",
act=compute_path,
default_args={"box_coords": [100,0,0],
"ltrack_pos": robot.left_track.position,
"rtrack_pos": robot.right_track.position,
}
),
State(
name="MOVE_TO_BOX",
act=A_star_move_to_box_blind,
default_args={
"ltrack_pos": robot.left_track.position,
"rtrack_pos": robot.right_track.position,
"TIME": time.time()
}
),
State(
name="MOVE_TO_BOX_BY_VISION",
act=move_to_box_by_vision,
default_args={
"ltrack_pos": robot.left_track.position,
"rtrack_pos": robot.right_track.position,
"TIME": time.time()
}
),
State(
name="PLACE_OBJECT_IN_THE_BOX",
act=place_object_in_the_box,
),
State(
name="FINAL_STATE",
act=lambda robot, frame, **args: time.sleep(.5)
)
]
state_dict = {}
for state in states:
state_dict[state.name] = state
start_state = states[0]
main_loop(robot, start_state, state_dict, delay=0)
|
|
#!/usr/bin/env python2
from __future__ import print_function
from bs4 import BeautifulSoup as bs
import requests
import sys
import os
def readConfig():
"""
Read conf file and returns (editorName, pathToSave)
"""
confPath = os.path.expanduser("~")
confFile = os.path.join(confPath, ".forcecode")
if not os.path.isfile(confFile):
fileI = open(confFile, "w")
fileI.write("editor:gedit:")
fileI.write("path:~/.forceCode/:")
fileI.close()
return ('gedit','~/.forceCode/')
else:
file = open(confFile, "r")
data = file.read()
data = data.split(':')
return (data[1], data[3])
class forcecode:
"""
Class to extract information from CodeForces Website
"""
def __init__(self):
"""
Initializes the class
"""
self.round = raw_input("Enter the round number : ")
assert self.round.isdigit()
self.url = r"http://codeforces.com/contest/" + str(self.round) + r"/problems"
self.editor, self.tempPath = readConfig()
print("Waiting to Connect to CodeForces...")
self.soup = bs(requests.get(self.url).text)
print("Round " + self.round + " Connected!")
self.tmpPath = os.path.expanduser(os.path.join(self.tempPath + str(self.round)))
self.data = [{}]
self.input = []
self.output = []
self.doIt()
def doIt(self):
self.getIO()
self.getQuestion()
self.saveData()
#bl.showAll()
def getIO(self):
"""
Parses input/output data from self.soup
"""
di = self.soup.findAll('div',{'class' : 'input'})
do = self.soup.findAll('div',{'class' : 'output'})
for x, y in zip(di, do):
t1 = str(x).replace("<div class=\"input\"><div class=\"title\">Input</div><pre>","")
t1 = t1.replace("<br/>","\n").replace("</pre></div>","")
t2 = str(y).replace("<div class=\"output\"><div class=\"title\">Output</div><pre>","")
t2 = t2.replace("<br/>","\n").replace("</pre></div>","")
self.input.append(t1)
self.output.append(t2)
def getQuestion(self):
"""
Parses Question from self.soup
"""
t1 = []
for x in self.soup.findAll('div', {'class' : 'title'}):
t1.append(x.text)
currQues = -1
while len(t1) > 0:
if t1[0] == "Input":
try:
try:
self.data[currQues]["input"].append(self.input[0])
self.data[currQues]["output"].append(self.output[0])
except:
self.data[currQues]["input"] = []
self.data[currQues]["output"] = []
self.data[currQues]["input"].append(self.input[0])
self.data[currQues]["output"].append(self.output[0])
finally:
self.input.pop(0)
self.output.pop(0)
t1.pop(0)
elif t1[0] == "Output":
t1.pop(0)
else:
currQues += 1
self.data[currQues]["question"] = t1[0]
t1.pop(0)
self.data.append({})
self.data.pop()
i = 0
for t1 in self.soup.findAll('div', {'class' : 'problem-statement'}):
full_quest = ""
for quest in t1.findAll('p'):
full_quest = full_quest + '\n' + quest.text
self.data[i]["detail"] = full_quest
i += 1
def showAll(self):
"""
Show All data on screen
"""
for dat in self.data:
print("Question : ")
print(dat["question"])
print(dat["detail"])
print("Input : ")
for x in dat["input"]:
print(x)
print("Output : ")
for x in dat["output"]:
print(x)
raw_input("Press Any Key...")
def saveData(self):
"""
Save data to Disk
"""
fileN = ""
for tgn in xrange(len(self.data)):
tg = chr(ord('A') + tgn)
tagPath = os.path.join(self.tmpPath, tg)
if not os.path.exists(tagPath):
os.makedirs(tagPath)
inputFile = os.path.join(tagPath, "input.txt")
outputFile = os.path.join(tagPath, "output.txt")
cppFile = os.path.join(tagPath, "program" + tg + ".cpp")
fileI = open(inputFile, "w")
for x in self.data[tgn]["input"]:
try:
fileI.write(x)
except:
fileI.write(x.encode('utf8'))
fileI.close()
fileO = open(outputFile, "w")
for x in self.data[tgn]["output"]:
try:
fileO.write(x)
except:
fileO.write(x.encode('utf8'))
fileO.close()
fileC = open(cppFile, "w")
fileC.write("/*\n")
try:
fileC.write(self.data[tgn]["question"])
except:
fileC.write(self.data[tgn]["question"].encode('utf8'))
try:
fileC.write(self.data[tgn]["detail"])
except:
fileC.write(self.data[tgn]["detail"].encode('utf8'))
fileC.write("\n*/\n\n")
fileC.close()
fileN = fileN + " " + cppFile
print("All files save to : " + self.tmpPath + ".")
if self.editor != "NONE":
print("Opening All files in " + self.editor + " editor.")
os.system(self.editor + " " + fileN)
from glob import glob as globe
def getCppFile():
"""
Returns the CPP file which is to be investigated
"""
allCppFile = globe("*.cpp")
if len(allCppFile) == 0 :
print("No C++ File found.")
sys.exit(1)
elif len(allCppFile) > 1:
print("More than one C++ files Found\nYou need to enter manually.")
cppFile = raw_input("Enter the Name of C++ file : ")
path = os.path.join(os.getcwd(), cppFile)
if not os.path.isfile(path):
print("No C++ File found with Name : " + cppFile)
sys.exit(1)
else:
return os.path.join(os.getcwd(), cppFile)
else:
print("C++ file found : " + allCppFile[0])
return os.path.join(os.getcwd(), allCppFile[0])
def getInputFile():
"""
Returns the input file
"""
inputFile = globe("input.txt")
if len(inputFile) < 1:
print("No input file found.\nEnter manually.")
inputFile = raw_input("Enter input File : ")
path = os.path.join(os.getcwd(), intputFile)
if not os.path.isfile(path):
print("No File found with Name : " + cppFile)
sys.exit(1)
else:
return os.path.join(os.getcwd(), intputFile)
else:
print("Input File Found : " + inputFile[0])
return os.path.join(os.getcwd(), inputFile[0])
def getOutputFile():
"""
Returns the input file
"""
outputFile = globe("output.txt")
if len(outputFile) == 1:
return os.path.join(os.getcwd(), outputFile[0])
class forcecoderunner:
"""
Code to run the C++ file
"""
def __init__(self):
"""
Initialises the Class
"""
self.cppFile = getCppFile()
self.inputFile = getInputFile()
self.outputOut = os.path.join(os.getcwd(), os.path.splitext(self.cppFile)[0] + ".out")
self.command = "set -e;g++ -std=c++14 -Wall -Wextra -pedantic -pthread -O2 -Wshadow -Wformat=2 -Wfloat-equal -Wlogical-op -Wcast-qual -Wcast-align -fwhole-program -D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC "
self.command = self.command + self.cppFile + " -o " + self.outputOut + " && " + self.outputOut + " < " + self.inputFile
poutput = os.popen(self.command).read().strip()
print("Output of your code : ")
print(poutput)
self.outputFile = getOutputFile()
if not self.outputFile is None:
file = open(self.outputFile, 'r')
eoutput = file.read().strip()
print("Currect Output : ")
print(eoutput)
if poutput == eoutput:
try:
roundno = raw_input('Enter round number : ')
if roundno.isdigit():
os.system("xdg-open http://codeforces.com/contest/" + roundno + "/submit")
except:
pass
if __name__ == '__main__':
pass
if sys.argv[1] == '-r':
f1 = forcecoderunner()
elif sys.argv[1] == '-h':
getHelp()
else:
bl = forcecode()
|
|
"""
Tests for the pandas custom headers in http(s) requests
"""
import gzip
import http.server
from io import BytesIO
import multiprocessing
import socket
import time
import urllib.error
import pytest
from pandas.compat import is_ci_environment
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
pytestmark = pytest.mark.skipif(
is_ci_environment(),
reason="This test can hang in our CI min_versions build "
"and leads to '##[error]The runner has "
"received a shutdown signal...' in GHA. GH 45651",
)
class BaseUserAgentResponder(http.server.BaseHTTPRequestHandler):
"""
Base class for setting up a server that can be set up to respond
with a particular file format with accompanying content-type headers.
The interfaces on the different io methods are different enough
that this seemed logical to do.
"""
def start_processing_headers(self):
"""
shared logic at the start of a GET request
"""
self.send_response(200)
self.requested_from_user_agent = self.headers["User-Agent"]
response_df = pd.DataFrame(
{
"header": [self.requested_from_user_agent],
}
)
return response_df
def gzip_bytes(self, response_bytes):
"""
some web servers will send back gzipped files to save bandwidth
"""
with BytesIO() as bio:
with gzip.GzipFile(fileobj=bio, mode="w") as zipper:
zipper.write(response_bytes)
response_bytes = bio.getvalue()
return response_bytes
def write_back_bytes(self, response_bytes):
"""
shared logic at the end of a GET request
"""
self.wfile.write(response_bytes)
class CSVUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "text/csv")
self.end_headers()
response_bytes = response_df.to_csv(index=False).encode("utf-8")
self.write_back_bytes(response_bytes)
class GzippedCSVUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "text/csv")
self.send_header("Content-Encoding", "gzip")
self.end_headers()
response_bytes = response_df.to_csv(index=False).encode("utf-8")
response_bytes = self.gzip_bytes(response_bytes)
self.write_back_bytes(response_bytes)
class JSONUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "application/json")
self.end_headers()
response_bytes = response_df.to_json().encode("utf-8")
self.write_back_bytes(response_bytes)
class GzippedJSONUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "application/json")
self.send_header("Content-Encoding", "gzip")
self.end_headers()
response_bytes = response_df.to_json().encode("utf-8")
response_bytes = self.gzip_bytes(response_bytes)
self.write_back_bytes(response_bytes)
class ParquetPyArrowUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "application/octet-stream")
self.end_headers()
response_bytes = response_df.to_parquet(index=False, engine="pyarrow")
self.write_back_bytes(response_bytes)
class ParquetFastParquetUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "application/octet-stream")
self.end_headers()
# the fastparquet engine doesn't like to write to a buffer
# it can do it via the open_with function being set appropriately
# however it automatically calls the close method and wipes the buffer
# so just overwrite that attribute on this instance to not do that
# protected by an importorskip in the respective test
import fsspec
response_df.to_parquet(
"memory://fastparquet_user_agent.parquet",
index=False,
engine="fastparquet",
compression=None,
)
with fsspec.open("memory://fastparquet_user_agent.parquet", "rb") as f:
response_bytes = f.read()
self.write_back_bytes(response_bytes)
class PickleUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "application/octet-stream")
self.end_headers()
bio = BytesIO()
response_df.to_pickle(bio)
response_bytes = bio.getvalue()
self.write_back_bytes(response_bytes)
class StataUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "application/octet-stream")
self.end_headers()
bio = BytesIO()
response_df.to_stata(bio, write_index=False)
response_bytes = bio.getvalue()
self.write_back_bytes(response_bytes)
class AllHeaderCSVResponder(http.server.BaseHTTPRequestHandler):
"""
Send all request headers back for checking round trip
"""
def do_GET(self):
response_df = pd.DataFrame(self.headers.items())
self.send_response(200)
self.send_header("Content-Type", "text/csv")
self.end_headers()
response_bytes = response_df.to_csv(index=False).encode("utf-8")
self.wfile.write(response_bytes)
def wait_until_ready(func, *args, **kwargs):
def inner(*args, **kwargs):
while True:
try:
return func(*args, **kwargs)
except urllib.error.URLError:
# Connection refused as http server is starting
time.sleep(0.1)
return inner
def process_server(responder, port):
with http.server.HTTPServer(("localhost", port), responder) as server:
server.handle_request()
server.server_close()
@pytest.fixture
def responder(request):
"""
Fixture that starts a local http server in a separate process on localhost
and returns the port.
Running in a separate process instead of a thread to allow termination/killing
of http server upon cleanup.
"""
# Find an available port
with socket.socket() as sock:
sock.bind(("localhost", 0))
port = sock.getsockname()[1]
server_process = multiprocessing.Process(
target=process_server, args=(request.param, port)
)
server_process.start()
yield port
server_process.join(10)
server_process.terminate()
kill_time = 5
wait_time = 0
while server_process.is_alive():
if wait_time > kill_time:
server_process.kill()
break
else:
wait_time += 0.1
time.sleep(0.1)
server_process.close()
@pytest.mark.parametrize(
"responder, read_method, parquet_engine",
[
(CSVUserAgentResponder, pd.read_csv, None),
(JSONUserAgentResponder, pd.read_json, None),
(ParquetPyArrowUserAgentResponder, pd.read_parquet, "pyarrow"),
pytest.param(
ParquetFastParquetUserAgentResponder,
pd.read_parquet,
"fastparquet",
# TODO(ArrayManager) fastparquet
marks=[
td.skip_array_manager_not_yet_implemented,
],
),
(PickleUserAgentResponder, pd.read_pickle, None),
(StataUserAgentResponder, pd.read_stata, None),
(GzippedCSVUserAgentResponder, pd.read_csv, None),
(GzippedJSONUserAgentResponder, pd.read_json, None),
],
indirect=["responder"],
)
def test_server_and_default_headers(responder, read_method, parquet_engine):
if parquet_engine is not None:
pytest.importorskip(parquet_engine)
if parquet_engine == "fastparquet":
pytest.importorskip("fsspec")
read_method = wait_until_ready(read_method)
if parquet_engine is None:
df_http = read_method(f"http://localhost:{responder}")
else:
df_http = read_method(f"http://localhost:{responder}", engine=parquet_engine)
assert not df_http.empty
@pytest.mark.parametrize(
"responder, read_method, parquet_engine",
[
(CSVUserAgentResponder, pd.read_csv, None),
(JSONUserAgentResponder, pd.read_json, None),
(ParquetPyArrowUserAgentResponder, pd.read_parquet, "pyarrow"),
pytest.param(
ParquetFastParquetUserAgentResponder,
pd.read_parquet,
"fastparquet",
# TODO(ArrayManager) fastparquet
marks=[
td.skip_array_manager_not_yet_implemented,
],
),
(PickleUserAgentResponder, pd.read_pickle, None),
(StataUserAgentResponder, pd.read_stata, None),
(GzippedCSVUserAgentResponder, pd.read_csv, None),
(GzippedJSONUserAgentResponder, pd.read_json, None),
],
indirect=["responder"],
)
def test_server_and_custom_headers(responder, read_method, parquet_engine):
if parquet_engine is not None:
pytest.importorskip(parquet_engine)
if parquet_engine == "fastparquet":
pytest.importorskip("fsspec")
custom_user_agent = "Super Cool One"
df_true = pd.DataFrame({"header": [custom_user_agent]})
read_method = wait_until_ready(read_method)
if parquet_engine is None:
df_http = read_method(
f"http://localhost:{responder}",
storage_options={"User-Agent": custom_user_agent},
)
else:
df_http = read_method(
f"http://localhost:{responder}",
storage_options={"User-Agent": custom_user_agent},
engine=parquet_engine,
)
tm.assert_frame_equal(df_true, df_http)
@pytest.mark.parametrize(
"responder, read_method",
[
(AllHeaderCSVResponder, pd.read_csv),
],
indirect=["responder"],
)
def test_server_and_all_custom_headers(responder, read_method):
custom_user_agent = "Super Cool One"
custom_auth_token = "Super Secret One"
storage_options = {
"User-Agent": custom_user_agent,
"Auth": custom_auth_token,
}
read_method = wait_until_ready(read_method)
df_http = read_method(
f"http://localhost:{responder}",
storage_options=storage_options,
)
df_http = df_http[df_http["0"].isin(storage_options.keys())]
df_http = df_http.sort_values(["0"]).reset_index()
df_http = df_http[["0", "1"]]
keys = list(storage_options.keys())
df_true = pd.DataFrame({"0": keys, "1": [storage_options[k] for k in keys]})
df_true = df_true.sort_values(["0"])
df_true = df_true.reset_index().drop(["index"], axis=1)
tm.assert_frame_equal(df_true, df_http)
@pytest.mark.parametrize(
"engine",
[
"pyarrow",
"fastparquet",
],
)
def test_to_parquet_to_disk_with_storage_options(engine):
headers = {
"User-Agent": "custom",
"Auth": "other_custom",
}
pytest.importorskip(engine)
true_df = pd.DataFrame({"column_name": ["column_value"]})
msg = (
"storage_options passed with file object or non-fsspec file path|"
"storage_options passed with buffer, or non-supported URL"
)
with pytest.raises(ValueError, match=msg):
true_df.to_parquet("/tmp/junk.parquet", storage_options=headers, engine=engine)
|
|
from __future__ import unicode_literals
import argparse
import json
import re
import sys
from threading import Lock
import six
from flask import Flask
from flask.testing import FlaskClient
from six.moves.urllib.parse import urlencode
from werkzeug.routing import BaseConverter
from werkzeug.serving import run_simple
from moto.backends import BACKENDS
from moto.core.utils import convert_flask_to_httpretty_response
HTTP_METHODS = ["GET", "POST", "PUT", "DELETE", "HEAD", "PATCH"]
class DomainDispatcherApplication(object):
"""
Dispatch requests to different applications based on the "Host:" header
value. We'll match the host header value with the url_bases of each backend.
"""
def __init__(self, create_app, service=None):
self.create_app = create_app
self.lock = Lock()
self.app_instances = {}
self.service = service
def get_backend_for_host(self, host):
if self.service:
return self.service
if host in BACKENDS:
return host
for backend_name, backend in BACKENDS.items():
for url_base in list(backend.values())[0].url_bases:
if re.match(url_base, 'http://%s' % host):
return backend_name
raise RuntimeError('Invalid host: "%s"' % host)
def get_application(self, environ):
path_info = environ.get('PATH_INFO', '')
# The URL path might contain non-ASCII text, for instance unicode S3 bucket names
if six.PY2 and isinstance(path_info, str):
path_info = six.u(path_info)
if six.PY3 and isinstance(path_info, six.binary_type):
path_info = path_info.decode('utf-8')
if path_info.startswith("/moto-api") or path_info == "/favicon.ico":
host = "moto_api"
elif path_info.startswith("/latest/meta-data/"):
host = "instance_metadata"
else:
host = environ['HTTP_HOST'].split(':')[0]
if host in {'localhost', 'motoserver'} or host.startswith("192.168."):
# Fall back to parsing auth header to find service
# ['Credential=sdffdsa', '20170220', 'us-east-1', 'sns', 'aws4_request']
try:
_, _, region, service, _ = environ['HTTP_AUTHORIZATION'].split(",")[0].split()[
1].split("/")
except (KeyError, ValueError):
# Some cognito-idp endpoints (e.g. change password) do not receive an auth header.
if environ.get('HTTP_X_AMZ_TARGET', '').startswith('AWSCognitoIdentityProviderService'):
service = 'cognito-idp'
else:
service = 's3'
region = 'us-east-1'
if service == 'dynamodb':
dynamo_api_version = environ['HTTP_X_AMZ_TARGET'].split("_")[1].split(".")[0]
# If Newer API version, use dynamodb2
if dynamo_api_version > "20111205":
host = "dynamodb2"
else:
host = "{service}.{region}.amazonaws.com".format(
service=service, region=region)
with self.lock:
backend = self.get_backend_for_host(host)
app = self.app_instances.get(backend, None)
if app is None:
app = self.create_app(backend)
self.app_instances[backend] = app
return app
def __call__(self, environ, start_response):
backend_app = self.get_application(environ)
return backend_app(environ, start_response)
class RegexConverter(BaseConverter):
# http://werkzeug.pocoo.org/docs/routing/#custom-converters
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
class AWSTestHelper(FlaskClient):
def action_data(self, action_name, **kwargs):
"""
Method calls resource with action_name and returns data of response.
"""
opts = {"Action": action_name}
opts.update(kwargs)
res = self.get("/?{0}".format(urlencode(opts)),
headers={"Host": "{0}.us-east-1.amazonaws.com".format(self.application.service)})
return res.data.decode("utf-8")
def action_json(self, action_name, **kwargs):
"""
Method calls resource with action_name and returns object obtained via
deserialization of output.
"""
return json.loads(self.action_data(action_name, **kwargs))
def create_backend_app(service):
from werkzeug.routing import Map
# Create the backend_app
backend_app = Flask(__name__)
backend_app.debug = True
backend_app.service = service
# Reset view functions to reset the app
backend_app.view_functions = {}
backend_app.url_map = Map()
backend_app.url_map.converters['regex'] = RegexConverter
backend = list(BACKENDS[service].values())[0]
for url_path, handler in backend.flask_paths.items():
if handler.__name__ == 'dispatch':
endpoint = '{0}.dispatch'.format(handler.__self__.__name__)
else:
endpoint = None
original_endpoint = endpoint
index = 2
while endpoint in backend_app.view_functions:
# HACK: Sometimes we map the same view to multiple url_paths. Flask
# requries us to have different names.
endpoint = original_endpoint + str(index)
index += 1
backend_app.add_url_rule(
url_path,
endpoint=endpoint,
methods=HTTP_METHODS,
view_func=convert_flask_to_httpretty_response(handler),
strict_slashes=False,
)
backend_app.test_client_class = AWSTestHelper
return backend_app
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser()
# Keep this for backwards compat
parser.add_argument(
"service",
type=str,
nargs='?', # http://stackoverflow.com/a/4480202/731592
default=None)
parser.add_argument(
'-H', '--host', type=str,
help='Which host to bind',
default='127.0.0.1')
parser.add_argument(
'-p', '--port', type=int,
help='Port number to use for connection',
default=5000)
parser.add_argument(
'-r', '--reload',
action='store_true',
help='Reload server on a file change',
default=False
)
parser.add_argument(
'-s', '--ssl',
action='store_true',
help='Enable SSL encrypted connection with auto-generated certificate (use https://... URL)',
default=False
)
parser.add_argument(
'-c', '--ssl-cert', type=str,
help='Path to SSL certificate',
default=None)
parser.add_argument(
'-k', '--ssl-key', type=str,
help='Path to SSL private key',
default=None)
args = parser.parse_args(argv)
# Wrap the main application
main_app = DomainDispatcherApplication(
create_backend_app, service=args.service)
main_app.debug = True
ssl_context = None
if args.ssl_key and args.ssl_cert:
ssl_context = (args.ssl_cert, args.ssl_key)
elif args.ssl:
ssl_context = 'adhoc'
run_simple(args.host, args.port, main_app,
threaded=True, use_reloader=args.reload,
ssl_context=ssl_context)
if __name__ == '__main__':
main()
|
|
import collections
import operator
import os
import json
import logging
import mimetypes
import md5
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.views.generic import ListView, DetailView
from django.utils.datastructures import SortedDict
from django.views.decorators.cache import cache_page
from taggit.models import Tag
import requests
from .base import ProjectOnboardMixin
from readthedocs.builds.constants import LATEST
from readthedocs.builds.filters import VersionSlugFilter
from readthedocs.builds.models import Version
from readthedocs.projects.models import Project, ImportedFile
from readthedocs.search.indexes import PageIndex
from readthedocs.search.views import LOG_TEMPLATE
log = logging.getLogger(__name__)
search_log = logging.getLogger(__name__ + '.search')
mimetypes.add_type("application/epub+zip", ".epub")
class ProjectIndex(ListView):
model = Project
def get_queryset(self):
queryset = Project.objects.public(self.request.user)
if self.kwargs.get('tag'):
self.tag = get_object_or_404(Tag, slug=self.kwargs.get('tag'))
queryset = queryset.filter(tags__name__in=[self.tag.slug])
else:
self.tag = None
if self.kwargs.get('username'):
self.user = get_object_or_404(User, username=self.kwargs.get('username'))
queryset = queryset.filter(user=self.user)
else:
self.user = None
return queryset
def get_context_data(self, **kwargs):
context = super(ProjectIndex, self).get_context_data(**kwargs)
context['person'] = self.user
context['tag'] = self.tag
return context
project_index = ProjectIndex.as_view()
class ProjectDetailView(ProjectOnboardMixin, DetailView):
'''Display project onboard steps'''
model = Project
slug_url_kwarg = 'project_slug'
def get_queryset(self):
return Project.objects.protected(self.request.user)
def get_context_data(self, **kwargs):
context = super(ProjectDetailView, self).get_context_data(**kwargs)
project = self.get_object()
context['versions'] = Version.objects.public(
user=self.request.user, project=project)
context['filter'] = VersionSlugFilter(self.request.GET,
queryset=context['versions'])
protocol = 'http'
if self.request.is_secure():
protocol = 'https'
context['badge_url'] = "%s://%s%s?version=%s" % (
protocol,
settings.PRODUCTION_DOMAIN,
reverse('project_badge', args=[project.slug]),
project.get_default_version(),
)
context['site_url'] = "%s://%s%s?badge=%s" % (
protocol,
settings.PRODUCTION_DOMAIN,
reverse('projects_detail', args=[project.slug]),
project.get_default_version(),
)
return context
def _badge_return(redirect, url):
if redirect:
return HttpResponseRedirect(url)
else:
response = requests.get(url)
http_response = HttpResponse(response.content,
content_type="image/svg+xml")
http_response['Cache-Control'] = 'no-cache'
http_response['Etag'] = md5.new(url)
return http_response
# TODO remove this, it's a temporary fix to heavy database usage
@cache_page(60 * 30)
def project_badge(request, project_slug, redirect=True):
"""
Return a sweet badge for the project
"""
version_slug = request.GET.get('version', LATEST)
style = request.GET.get('style', 'flat')
try:
version = Version.objects.public(request.user).get(
project__slug=project_slug, slug=version_slug)
except Version.DoesNotExist:
url = (
'https://img.shields.io/badge/docs-unknown%20version-yellow.svg?style={style}'
.format(style=style))
return _badge_return(redirect, url)
version_builds = version.builds.filter(type='html', state='finished').order_by('-date')
if not version_builds.exists():
url = (
'https://img.shields.io/badge/docs-no%20builds-yellow.svg?style={style}'
.format(style=style))
return _badge_return(redirect, url)
last_build = version_builds[0]
if last_build.success:
color = 'brightgreen'
else:
color = 'red'
url = 'https://img.shields.io/badge/docs-%s-%s.svg?style=%s' % (
version.slug.replace('-', '--'), color, style)
return _badge_return(redirect, url)
def project_downloads(request, project_slug):
"""
A detail view for a project with various dataz
"""
project = get_object_or_404(Project.objects.protected(request.user), slug=project_slug)
versions = Version.objects.public(user=request.user, project=project)
version_data = SortedDict()
for version in versions:
data = version.get_downloads()
# Don't show ones that have no downloads.
if data:
version_data[version.slug] = data
# in case the MEDIA_URL is a protocol relative URL we just assume
# we want http as the protcol, so that Dash is able to handle the URL
if settings.MEDIA_URL.startswith('//'):
media_url_prefix = u'http:'
# but in case we're in debug mode and the MEDIA_URL is just a path
# we prefix it with a hardcoded host name and protocol
elif settings.MEDIA_URL.startswith('/') and settings.DEBUG:
media_url_prefix = u'http://%s' % request.get_host()
else:
media_url_prefix = ''
return render_to_response(
'projects/project_downloads.html',
{
'project': project,
'version_data': version_data,
'versions': versions,
'media_url_prefix': media_url_prefix,
},
context_instance=RequestContext(request),
)
def project_download_media(request, project_slug, type, version_slug):
"""
Download a specific piece of media.
Perform an auth check if serving in private mode.
"""
# Do private project auth checks
queryset = Project.objects.protected(request.user).filter(slug=project_slug)
if not queryset.exists():
raise Http404
privacy_level = getattr(settings, 'DEFAULT_PRIVACY_LEVEL', 'public')
if privacy_level == 'public' or settings.DEBUG:
path = os.path.join(settings.MEDIA_URL, type, project_slug, version_slug,
'%s.%s' % (project_slug, type.replace('htmlzip', 'zip')))
return HttpResponseRedirect(path)
else:
# Get relative media path
path = queryset[0].get_production_media_path(type=type, version_slug=version_slug).replace(
settings.PRODUCTION_ROOT, '/prod_artifacts'
)
content_type, encoding = mimetypes.guess_type(path)
content_type = content_type or 'application/octet-stream'
response = HttpResponse(content_type=content_type)
if encoding:
response["Content-Encoding"] = encoding
response['X-Accel-Redirect'] = path
# Include version in filename; this fixes a long-standing bug
filename = "%s-%s.%s" % (project_slug, version_slug, path.split('.')[-1])
response['Content-Disposition'] = 'filename=%s' % filename
return response
def search_autocomplete(request):
"""
return a json list of project names
"""
if 'term' in request.GET:
term = request.GET['term']
else:
raise Http404
queryset = (Project.objects.public(request.user).filter(name__icontains=term)[:20])
ret_list = []
for project in queryset:
ret_list.append({
'label': project.name,
'value': project.slug,
})
json_response = json.dumps(ret_list)
return HttpResponse(json_response, content_type='text/javascript')
def version_autocomplete(request, project_slug):
"""
return a json list of version names
"""
queryset = Project.objects.public(request.user)
get_object_or_404(queryset, slug=project_slug)
versions = Version.objects.public(request.user)
if 'term' in request.GET:
term = request.GET['term']
else:
raise Http404
version_queryset = versions.filter(slug__icontains=term)[:20]
names = version_queryset.values_list('slug', flat=True)
json_response = json.dumps(list(names))
return HttpResponse(json_response, content_type='text/javascript')
def version_filter_autocomplete(request, project_slug):
queryset = Project.objects.public(request.user)
project = get_object_or_404(queryset, slug=project_slug)
versions = Version.objects.public(request.user)
filter = VersionSlugFilter(request.GET, queryset=versions)
format = request.GET.get('format', 'json')
if format == 'json':
names = filter.qs.values_list('slug', flat=True)
json_response = json.dumps(list(names))
return HttpResponse(json_response, content_type='text/javascript')
elif format == 'html':
return render_to_response(
'core/version_list.html',
{
'project': project,
'versions': versions,
'filter': filter,
},
context_instance=RequestContext(request),
)
else:
return HttpResponse(status=400)
def file_autocomplete(request, project_slug):
"""
return a json list of version names
"""
if 'term' in request.GET:
term = request.GET['term']
else:
raise Http404
queryset = ImportedFile.objects.filter(project__slug=project_slug, path__icontains=term)[:20]
ret_list = []
for file in queryset:
ret_list.append({
'label': file.path,
'value': file.path,
})
json_response = json.dumps(ret_list)
return HttpResponse(json_response, content_type='text/javascript')
def elastic_project_search(request, project_slug):
"""
Use elastic search to search in a project.
"""
queryset = Project.objects.protected(request.user)
project = get_object_or_404(queryset, slug=project_slug)
version_slug = request.GET.get('version', LATEST)
query = request.GET.get('q', None)
if query:
user = ''
if request.user.is_authenticated():
user = request.user
log.info(LOG_TEMPLATE.format(
user=user,
project=project or '',
type='inproject',
version=version_slug or '',
language='',
msg=query or '',
))
if query:
kwargs = {}
body = {
"query": {
"bool": {
"should": [
{"match": {"title": {"query": query, "boost": 10}}},
{"match": {"headers": {"query": query, "boost": 5}}},
{"match": {"content": {"query": query}}},
]
}
},
"highlight": {
"fields": {
"title": {},
"headers": {},
"content": {},
}
},
"fields": ["title", "project", "version", "path"],
"filter": {
"and": [
{"term": {"project": project_slug}},
{"term": {"version": version_slug}},
]
},
"size": 50 # TODO: Support pagination.
}
# Add routing to optimize search by hitting the right shard.
kwargs['routing'] = project_slug
results = PageIndex().search(body, **kwargs)
else:
results = {}
if results:
# pre and post 1.0 compat
for num, hit in enumerate(results['hits']['hits']):
for key, val in hit['fields'].items():
if isinstance(val, list):
results['hits']['hits'][num]['fields'][key] = val[0]
return render_to_response(
'search/elastic_project_search.html',
{
'project': project,
'query': query,
'results': results,
},
context_instance=RequestContext(request),
)
def project_versions(request, project_slug):
"""
Shows the available versions and lets the user choose which ones he would
like to have built.
"""
project = get_object_or_404(Project.objects.protected(request.user),
slug=project_slug)
versions = Version.objects.public(user=request.user, project=project, only_active=False)
active_versions = versions.filter(active=True)
inactive_versions = versions.filter(active=False)
inactive_filter = VersionSlugFilter(request.GET, queryset=inactive_versions)
active_filter = VersionSlugFilter(request.GET, queryset=active_versions)
# If there's a wiped query string, check the string against the versions
# list and display a success message. Deleting directories doesn't know how
# to fail. :)
wiped = request.GET.get('wipe', '')
wiped_version = versions.filter(slug=wiped)
if wiped and wiped_version.count():
messages.success(request, 'Version wiped: ' + wiped)
return render_to_response(
'projects/project_version_list.html',
{
'inactive_filter': inactive_filter,
'active_filter': active_filter,
'project': project,
},
context_instance=RequestContext(request)
)
def project_analytics(request, project_slug):
"""
Have a analytics API placeholder
"""
project = get_object_or_404(Project.objects.protected(request.user),
slug=project_slug)
analytics_cache = cache.get('analytics:%s' % project_slug)
if analytics_cache:
analytics = json.loads(analytics_cache)
else:
try:
resp = requests.get(
'{host}/api/v1/index/1/heatmap/'.format(host=settings.GROK_API_HOST),
params={'project': project.slug, 'days': 7, 'compare': True}
)
analytics = resp.json()
cache.set('analytics:%s' % project_slug, resp.content, 1800)
except:
analytics = None
if analytics:
page_list = list(reversed(sorted(analytics['page'].items(),
key=operator.itemgetter(1))))
version_list = list(reversed(sorted(analytics['version'].items(),
key=operator.itemgetter(1))))
else:
page_list = []
version_list = []
full = request.GET.get('full')
if not full:
page_list = page_list[:20]
version_list = version_list[:20]
return render_to_response(
'projects/project_analytics.html',
{
'project': project,
'analytics': analytics,
'page_list': page_list,
'version_list': version_list,
'full': full,
},
context_instance=RequestContext(request)
)
def project_embed(request, project_slug):
"""
Have a content API placeholder
"""
project = get_object_or_404(Project.objects.protected(request.user),
slug=project_slug)
version = project.versions.get(slug=LATEST)
files = version.imported_files.order_by('path')
return render_to_response(
'projects/project_embed.html',
{
'project': project,
'files': files,
'settings': {
'GROK_API_HOST': settings.GROK_API_HOST,
'URI': request.build_absolute_uri(location='/').rstrip('/')
}
},
context_instance=RequestContext(request)
)
|
|
import random
import unittest
from hearthbreaker.agents.basic_agents import DoNothingAgent, PredictableAgent
from tests.agents.testing_agents import OneCardPlayingAgent, PlayAndAttackAgent, CardTestingAgent,\
SelfSpellTestingAgent, EnemyMinionSpellTestingAgent
from tests.testing_utils import generate_game_for
from hearthbreaker.cards import *
class TestWarrior(unittest.TestCase):
def setUp(self):
random.seed(1857)
def test_ArathiWeaponsmith(self):
game = generate_game_for(ArathiWeaponsmith, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
# Arathi Weaponsmith should be played
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[0].health)
self.assertEqual("Arathi Weaponsmith", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].hero.weapon.base_attack)
self.assertEqual(2, game.players[0].hero.weapon.durability)
def test_Armorsmith(self):
game = generate_game_for(Armorsmith, StonetuskBoar, OneCardPlayingAgent, PlayAndAttackAgent)
# Armorsmith should be played
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
self.assertEqual(4, game.players[0].minions[0].health)
self.assertEqual("Armorsmith", game.players[0].minions[0].card.name)
self.assertEqual(0, game.players[0].hero.armor)
# Three Stonetusks should attack, generating one armor each
game.play_single_turn()
self.assertEqual(1, game.players[0].minions[0].health)
self.assertEqual(3, game.players[0].hero.armor)
def test_CruelTaskmaster(self):
game = generate_game_for(CruelTaskmaster, Shieldbearer, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(0, game.players[1].minions[0].calculate_attack())
self.assertEqual(4, game.players[1].minions[0].health)
# Cruel Taskmaster should be played, targeting the Shieldbearer
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(2, game.players[0].minions[0].health)
self.assertEqual("Cruel Taskmaster", game.players[0].minions[0].card.name)
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(2, game.players[1].minions[0].calculate_attack())
self.assertEqual(3, game.players[1].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(2, len(game.players[1].minions))
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(2, game.players[0].minions[0].health)
self.assertEqual(2, game.players[0].minions[1].calculate_attack())
self.assertEqual(2, game.players[0].minions[1].health)
self.assertEqual(2, game.players[1].minions[0].calculate_attack())
self.assertEqual(3, game.players[1].minions[0].health)
self.assertEqual(2, game.players[1].minions[1].calculate_attack())
self.assertEqual(3, game.players[1].minions[1].health)
def test_FrothingBerserker(self):
game = generate_game_for(FrothingBerserker, AngryChicken, OneCardPlayingAgent, PlayAndAttackAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(3, len(game.players[1].minions))
# Frothing Berserker should be played
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(4, game.players[0].minions[0].health)
self.assertEqual("Frothing Berserker", game.players[0].minions[0].card.name)
# Three chickens should attack, generating a total of +6 attack for the Frothing Berserker
game.play_single_turn()
self.assertEqual(8, game.players[0].minions[0].calculate_attack())
self.assertEqual(1, game.players[0].minions[0].health)
def test_GrommashHellscream(self):
game = generate_game_for(GrommashHellscream, ExplosiveTrap, PlayAndAttackAgent, CardTestingAgent)
for turn in range(0, 14):
game.play_single_turn()
# Hellscream should be played, attacking (charge) and getting 2 damage by trap that will trigger enrage,
# dealing 10 damage as result
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(10, game.players[0].minions[0].calculate_attack())
self.assertEqual(7, game.players[0].minions[0].health)
self.assertEqual(20, game.players[1].hero.health)
game.players[0].minions[0].heal(2, None)
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
game.players[0].minions[0].damage(2, None)
self.assertEqual(10, game.players[0].minions[0].calculate_attack())
game.players[0].minions[0].silence()
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
game.players[0].minions[0].heal(2, None)
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
game.players[0].minions[0].damage(2, None)
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
def test_KorkronElite(self):
game = generate_game_for(KorkronElite, StonetuskBoar, PlayAndAttackAgent, DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
# Kor'kron Elite should be played and attack (charge)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[0].health)
self.assertEqual(26, game.players[1].hero.health)
def test_WarsongCommander(self):
game = generate_game_for(WarsongCommander, StonetuskBoar, PlayAndAttackAgent, DoNothingAgent)
# Super special test cases - http://www.hearthhead.com/card=1009/warsong-commander#comments:id=1935295
game.players[0].mana = 100
# Play the Warsong Commander
commander = WarsongCommander()
commander.player = game.players[0]
commander.use(game.players[0], game)
self.assertFalse(game.players[0].minions[0].charge()) # Should not give charge to itself
# Test so that enrage doesn't remove the charge
worgen = RagingWorgen()
worgen.player = game.players[0]
worgen.use(game.players[0], game)
game.players[0].minions[0].damage(1, None) # Trigger enrage, charge should still be active
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
self.assertTrue(game.players[0].minions[0].charge())
# Test so that charge gets applied before a battlecry
weapon = FieryWarAxe().create_weapon(game.players[0])
weapon.equip(game.players[0])
self.assertEqual(3, game.players[0].hero.weapon.base_attack)
self.assertEqual(2, game.players[0].hero.weapon.durability)
bloodsail = BloodsailRaider()
bloodsail.player = game.players[0]
bloodsail.use(game.players[0], game) # Should gain charge first, then 4 attack from weapon
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertTrue(game.players[0].minions[0].charge())
# TODO: Test with Faceless Manipulator here
# Remove the Warsong Commander
game.players[0].minions[-1].die(None)
game.check_delayed()
# The previous charged minions should still have charge
self.assertTrue(game.players[0].minions[0].charge())
self.assertTrue(game.players[0].minions[-1].charge())
# Test so that a minion played before Warsong doesn't get charge
shield = Shieldbearer()
shield.summon(game.players[0], game, 0)
self.assertFalse(game.players[0].minions[0].charge())
commander.use(game.players[0], game)
self.assertFalse(game.players[0].minions[1].charge())
# Remove the Warsong again
game.players[0].minions[0].die(None)
game.players[0].minions[0].activate_delayed()
# Buff a minion to above 3
game.players[0].minions[0].change_attack(5)
# Play Warsong, the buffed minion should not get charge
commander.use(game.players[0], game)
self.assertFalse(game.players[0].minions[1].charge())
# Auras!
stormwind = StormwindChampion()
stormwind.player = game.players[0]
stormwind.use(game.players[0], game)
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
self.assertEqual(4, game.players[0].minions[1].health)
# Kill the worgen
game.players[0].minions[-1].die(None)
game.players[0].minions[-1].activate_delayed()
# And play it again. It should get the aura FIRST, making it a 4/4 minion, and thus DOES NOT gain charge!
worgen.use(game.players[0], game)
self.assertFalse(game.players[0].minions[0].charge())
# Minions summoned by other minions
dragonling = DragonlingMechanic()
dragonling.player = game.players[0]
dragonling.use(game.players[0], game)
self.assertTrue(game.players[0].minions[0].charge())
self.assertEqual("Mechanical Dragonling", game.players[0].minions[1].card.name)
self.assertTrue(game.players[0].minions[1].charge())
# Kill them to make room
game.players[0].minions[0].die(None)
game.players[0].minions[0].activate_delayed()
game.players[0].minions[0].die(None)
game.players[0].minions[0].activate_delayed()
creeper = HauntedCreeper()
creeper.player = game.players[0]
creeper.use(game.players[0], game)
self.assertTrue(game.players[0].minions[0].charge())
game.players[0].minions[0].die(None)
game.players[0].minions[0].activate_delayed()
game.check_delayed()
self.assertEqual("Spectral Spider", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].charge())
def test_BattleRage(self):
game = generate_game_for(BattleRage, StonetuskBoar, CardTestingAgent, DoNothingAgent)
game.players[0].mana = 100
shield = Shieldbearer()
shield.player = game.players[0]
shield.use(game.players[0], game)
shield.use(game.players[0], game)
shield.use(game.players[0], game)
game.players[0].minions[0].damage(1, None)
game.players[0].minions[1].damage(1, None)
game.players[0].hero.damage(1, None)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(4, len(game.players[0].hand))
# Battle Rage should be played, 3 damaged characters = 3 cards drawn
game.play_single_turn()
self.assertEqual(7, len(game.players[0].hand))
def test_Brawl(self):
game = generate_game_for(Brawl, StonetuskBoar, CardTestingAgent, DoNothingAgent)
game.players[0].mana = 100
shield = Shieldbearer()
shield.player = game.players[0]
shield.use(game.players[0], game)
shield.use(game.players[0], game)
golem = HarvestGolem()
golem.player = game.players[0]
golem.use(game.players[0], game)
shield.use(game.players[1], game)
shield.use(game.players[1], game)
shield.use(game.players[1], game)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual(3, len(game.players[1].minions))
# Brawl should be played, leaving one minion behind and Damaged Golem should have spawned for first player
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Damaged Golem", game.players[0].minions[0].card.name)
self.assertEqual(1, len(game.players[1].minions))
def test_Charge(self):
game = generate_game_for([Shieldbearer, Charge], StonetuskBoar, CardTestingAgent, DoNothingAgent)
game.players[0].agent.play_on = 4
for turn in range(0, 6):
game.play_single_turn()
# Shieldbearer and Charge should be played
game.play_single_turn()
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertTrue(game.players[0].minions[0].charge())
def test_Cleave(self):
game = generate_game_for(Cleave, SenjinShieldmasta, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 10):
game.play_single_turn()
self.assertEqual(8, len(game.players[0].hand))
self.assertEqual(2, len(game.players[1].minions))
self.assertEqual(5, game.players[1].minions[0].health)
self.assertEqual(5, game.players[1].minions[1].health)
# 2 enemy minions are now in play, so Cleave should be played
game.play_single_turn()
self.assertEqual(8, len(game.players[0].hand))
self.assertEqual(2, len(game.players[1].minions))
self.assertEqual(3, game.players[1].minions[0].health)
self.assertEqual(3, game.players[1].minions[1].health)
def test_WhirlwindExecute(self):
game = generate_game_for(Execute, [GoldshireFootman, Whirlwind], CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].health)
game.play_single_turn()
self.assertEqual(0, len(game.players[1].minions))
def test_HeroicStrike(self):
game = generate_game_for(HeroicStrike, StonetuskBoar, PlayAndAttackAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(26, game.players[1].hero.health)
def test_InnerRageRampage(self):
game = generate_game_for([InnerRage, Rampage], GoldshireFootman, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(2, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].calculate_attack())
self.assertEqual(2, game.players[1].minions[0].health)
self.assertEqual(3, game.players[1].minions[1].calculate_attack())
self.assertEqual(1, game.players[1].minions[1].health)
game.play_single_turn()
self.assertEqual(2, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].calculate_attack())
self.assertEqual(2, game.players[1].minions[0].health)
self.assertEqual(6, game.players[1].minions[1].calculate_attack())
self.assertEqual(4, game.players[1].minions[1].health)
def test_ShieldBlockShieldSlam(self):
game = generate_game_for([ShieldBlock, ShieldSlam], Doomsayer, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(6, len(game.players[0].hand))
self.assertEqual(5, game.players[0].hero.armor)
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(7, game.players[1].minions[0].health)
game.play_single_turn()
self.assertEqual(5, game.players[0].hero.armor)
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(2, game.players[1].minions[0].health)
def test_Slam(self):
game = generate_game_for(Slam, [GoldshireFootman, Doomsayer], OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 4):
game.play_single_turn() # Slam to kill Footman, no draw
self.assertEqual(4, len(game.players[0].hand))
self.assertEqual(1, len(game.players[1].minions))
game.play_single_turn() # Slam and Doomsayer survives
self.assertEqual(5, len(game.players[0].hand))
self.assertEqual(1, len(game.players[1].minions))
def test_Upgrade(self):
game = generate_game_for(Upgrade, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, game.players[0].hero.weapon.base_attack)
self.assertEqual(3, game.players[0].hero.weapon.durability)
game.play_single_turn()
self.assertEqual(2, game.players[0].hero.weapon.base_attack)
self.assertEqual(4, game.players[0].hero.weapon.durability)
def test_MortalStrike(self):
game = generate_game_for(MortalStrike, StonetuskBoar, SelfSpellTestingAgent, DoNothingAgent)
game.players[0].hero.health = 14
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(10, game.players[0].hero.health)
game.play_single_turn()
self.assertEqual(4, game.players[0].hero.health)
def test_CommandingShout(self):
game = generate_game_for([StonetuskBoar, StonetuskBoar, StonetuskBoar, StonetuskBoar,
CommandingShout], UnstableGhoul,
PlayAndAttackAgent, OneCardPlayingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Unstable Ghoul", game.current_player.minions[0].card.name)
game.play_single_turn()
self.assertEqual(4, len(game.current_player.minions))
self.assertEqual(0, len(game.other_player.minions))
game.play_single_turn()
game.play_single_turn()
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(0, len(game.current_player.minions))
def test_Gorehowl(self):
game = generate_game_for([Gorehowl, Deathwing], [BoulderfistOgre, Deathwing],
PlayAndAttackAgent, CardTestingAgent)
for turn in range(0, 12):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(7, game.players[1].minions[0].health)
game.play_single_turn()
self.assertEqual(0, len(game.players[1].minions))
self.assertEqual(1, game.players[0].hero.weapon.durability) # Gorehowl does not break from killing Boulderfist
self.assertEqual(6, game.players[0].hero.weapon.base_attack) # But it does lose 1 attack
self.assertEqual(24, game.players[0].hero.health)
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
game.play_single_turn()
self.assertIsNone(game.players[0].hero.weapon) # Attacks face and weapon breaks
self.assertEqual(24, game.players[0].hero.health)
self.assertEqual(24, game.players[1].hero.health)
def test_FieryWarAxe(self):
game = generate_game_for(FieryWarAxe, BoulderfistOgre,
PlayAndAttackAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, game.current_player.hero.weapon.durability)
self.assertEqual(3, game.current_player.hero.weapon.base_attack)
self.assertEqual(27, game.other_player.hero.health)
def test_DeathsBite(self):
game = generate_game_for([IronfurGrizzly, DeathsBite], Deathlord,
PlayAndAttackAgent, OneCardPlayingAgent)
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertIsNotNone(game.current_player.hero.weapon)
self.assertEqual(1, game.other_player.minions[0].health)
game.play_single_turn()
game.play_single_turn()
# The Death's Bite attacks the new Deathlord, triggering the weapon's deathrattle
# This finishes off the other deathlord and the first friendly Grizzly
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(3, game.other_player.minions[0].health)
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(2, game.current_player.minions[0].health)
self.assertEqual(3, game.current_player.minions[1].health)
def test_Warbot(self):
game = generate_game_for(Warbot, StonetuskBoar, CardTestingAgent, PlayAndAttackAgent)
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(1, game.current_player.minions[0].calculate_attack())
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(2, game.other_player.minions[0].calculate_attack())
def test_BouncingBlades(self):
game = generate_game_for([GoldshireFootman, EchoingOoze, BouncingBlade], [GoldshireFootman, EchoingOoze],
CardTestingAgent, CardTestingAgent)
for turn in range(4):
game.play_single_turn()
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual(3, len(game.players[1].minions))
self.assertEqual(2, game.players[0].minions[0].health)
self.assertEqual(2, game.players[0].minions[1].health)
self.assertEqual(2, game.players[0].minions[2].health)
self.assertEqual(2, game.players[1].minions[0].health)
self.assertEqual(2, game.players[1].minions[1].health)
self.assertEqual(2, game.players[1].minions[2].health)
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(3, len(game.players[1].minions))
self.assertEqual(2, game.players[0].minions[0].health)
self.assertEqual(1, game.players[0].minions[1].health)
self.assertEqual(2, game.players[1].minions[0].health)
self.assertEqual(2, game.players[1].minions[1].health)
self.assertEqual(1, game.players[1].minions[2].health)
def test_OgreWarmaul(self):
game = generate_game_for(OgreWarmaul,
[StonetuskBoar, GoldshireFootman, SilverbackPatriarch],
PlayAndAttackAgent, OneCardPlayingAgent)
for turn in range(5):
game.play_single_turn()
self.assertIsNotNone(game.current_player.hero.weapon)
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual("Stonetusk Boar", game.other_player.minions[0].card.name)
self.assertEqual(30, game.other_player.hero.health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual("Silverback Patriarch", game.other_player.minions[0].card.name)
self.assertEqual(30, game.other_player.hero.health)
def test_SiegeEngine(self):
game = generate_game_for(SiegeEngine, StonetuskBoar, PredictableAgent, DoNothingAgent)
# Arathi Weaponsmith should be played
for turn in range(0, 13):
game.play_single_turn()
self.assertEqual(12, game.players[0].hero.armor)
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[-1].calculate_attack())
self.assertEqual(5, game.players[0].minions[-1].health)
self.assertEqual("Siege Engine", game.players[0].minions[0].card.name)
# Hero Power will be used, triggering the Siege Engine
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(14, game.players[0].hero.armor)
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(6, game.players[0].minions[-1].calculate_attack())
self.assertEqual(5, game.players[0].minions[-1].health)
def test_Crush(self):
game = generate_game_for([Crush, ChillwindYeti], DreadInfernal, EnemyMinionSpellTestingAgent,
CardTestingAgent)
# Player 2 plays a Dread Infernal
for turn in range(0, 12):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
# Player 1 pays 7 mana to use Crush
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(0, len(game.players[1].minions))
self.assertEqual(0, game.players[0].mana)
# Player 2 plays another Dread Infernal
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(1, len(game.players[1].minions))
# Player 1 plays Yeti, can't afford Crush
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(1, len(game.players[1].minions))
# Player 2 plays another Dread Infernal, damaging the Yeti
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(2, len(game.players[1].minions))
self.assertEqual(4, game.players[0].minions[-1].health)
# Player 1 pays 3 mana to use Crush and 4 the play a 2nd Yeti
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(2, game.players[0].mana)
def test_BurrowingMine(self):
game = generate_game_for(BurrowingMine, StonetuskBoar, DoNothingAgent, DoNothingAgent)
game.play_single_turn()
self.assertEqual(0, game.current_player.hero.health)
self.assertEqual(3, len(game.current_player.hand))
self.assertEqual(0, game.current_player.deck.left)
def test_IronJuggernaut(self):
game = generate_game_for(IronJuggernaut, CircleOfHealing, OneCardPlayingAgent, PredictableAgent)
for turn in range(11):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Iron Juggernaut", game.players[0].minions[0].card.name)
found_mine = False
for card in game.players[1].deck.cards:
if card.name == "Burrowing Mine":
found_mine = True
self.assertTrue(found_mine, "Did not find the burrowing mine in the opponent's deck")
# Will draw multiple mines in a row
self.assertEqual(30, game.players[1].hero.health)
for turn in range(43):
game.play_single_turn()
self.assertEqual(0, game.players[1].hero.health)
def test_ScrewjankClunker(self):
game = generate_game_for([Wisp, ScrewjankClunker, ScrewjankClunker], [Wisp, MoltenGiant],
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(8):
game.play_single_turn()
# Clunker cannot buff anything
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(5, game.players[0].minions[0].health)
self.assertEqual(1, game.players[0].minions[1].calculate_attack())
self.assertEqual(1, game.players[0].minions[1].health)
self.assertEqual(1, game.players[1].minions[0].calculate_attack())
self.assertEqual(1, game.players[1].minions[0].health)
game.play_single_turn()
# Clunker buffs previous Clunker
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(5, game.players[0].minions[0].health)
self.assertEqual(4, game.players[0].minions[1].calculate_attack())
self.assertEqual(7, game.players[0].minions[1].health)
self.assertEqual(1, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[2].health)
self.assertEqual(1, game.players[1].minions[0].calculate_attack())
self.assertEqual(1, game.players[1].minions[0].health)
def test_AxeFlinger(self):
game = generate_game_for(AxeFlinger, [MortalCoil, ShadowWordPain], OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(7):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn() # Mortal Coils the Axe Flinger
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(28, game.players[1].hero.health)
game.play_single_turn() # Plays 2nd Axe Flinger
game.play_single_turn() # Pains 1 Axe Flinger, no damage
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(28, game.players[1].hero.health)
def test_Revenge(self):
game = generate_game_for(SinisterStrike, [ChillwindYeti, Revenge, Revenge],
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(11):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(4, game.players[1].minions[0].health) # 1st Revenge cast at 15 hp, so 1 damage
self.assertEqual(12, game.players[1].hero.health)
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].health) # 2nd Revenge cast at 12 hp, so 3 damage
self.assertEqual(12, game.players[1].hero.health)
|
|
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from scipy.stats import rankdata, tiecorrect
import pytest
class TestTieCorrect(object):
def test_empty(self):
"""An empty array requires no correction, should return 1.0."""
ranks = np.array([], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_one(self):
"""A single element requires no correction, should return 1.0."""
ranks = np.array([1.0], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_no_correction(self):
"""Arrays with no ties require no correction."""
ranks = np.arange(2.0)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
ranks = np.arange(3.0)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_basic(self):
"""Check a few basic examples of the tie correction factor."""
# One tie of two elements
ranks = np.array([1.0, 2.5, 2.5])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of two elements (same as above, but tie is not at the end)
ranks = np.array([1.5, 1.5, 3.0])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of three elements
ranks = np.array([1.0, 3.0, 3.0, 3.0])
c = tiecorrect(ranks)
T = 3.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# Two ties, lengths 2 and 3.
ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0])
c = tiecorrect(ranks)
T1 = 2.0
T2 = 3.0
N = ranks.size
expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N)
assert_equal(c, expected)
def test_overflow(self):
ntie, k = 2000, 5
a = np.repeat(np.arange(k), ntie)
n = a.size # ntie * k
out = tiecorrect(rankdata(a))
assert_equal(out, 1.0 - k * (ntie**3 - ntie) / float(n**3 - n))
class TestRankData(object):
def test_empty(self):
"""stats.rankdata([]) should return an empty array."""
a = np.array([], dtype=int)
r = rankdata(a)
assert_array_equal(r, np.array([], dtype=np.float64))
r = rankdata([])
assert_array_equal(r, np.array([], dtype=np.float64))
def test_one(self):
"""Check stats.rankdata with an array of length 1."""
data = [100]
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, np.array([1.0], dtype=np.float64))
r = rankdata(data)
assert_array_equal(r, np.array([1.0], dtype=np.float64))
def test_basic(self):
"""Basic tests of stats.rankdata."""
data = [100, 10, 50]
expected = np.array([3.0, 1.0, 2.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
data = [40, 10, 30, 10, 50]
expected = np.array([4.0, 1.5, 3.0, 1.5, 5.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
data = [20, 20, 20, 10, 10, 10]
expected = np.array([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
# The docstring states explicitly that the argument is flattened.
a2d = a.reshape(2, 3)
r = rankdata(a2d)
assert_array_equal(r, expected)
def test_rankdata_object_string(self):
min_rank = lambda a: [1 + sum(i < j for i in a) for j in a]
max_rank = lambda a: [sum(i <= j for i in a) for j in a]
ordinal_rank = lambda a: min_rank([(x, i) for i, x in enumerate(a)])
def average_rank(a):
return [(i + j) / 2.0 for i, j in zip(min_rank(a), max_rank(a))]
def dense_rank(a):
b = np.unique(a)
return [1 + sum(i < j for i in b) for j in a]
rankf = dict(min=min_rank, max=max_rank, ordinal=ordinal_rank,
average=average_rank, dense=dense_rank)
def check_ranks(a):
for method in 'min', 'max', 'dense', 'ordinal', 'average':
out = rankdata(a, method=method)
assert_array_equal(out, rankf[method](a))
val = ['foo', 'bar', 'qux', 'xyz', 'abc', 'efg', 'ace', 'qwe', 'qaz']
check_ranks(np.random.choice(val, 200))
check_ranks(np.random.choice(val, 200).astype('object'))
val = np.array([0, 1, 2, 2.718, 3, 3.141], dtype='object')
check_ranks(np.random.choice(val, 200).astype('object'))
def test_large_int(self):
data = np.array([2**60, 2**60+1], dtype=np.uint64)
r = rankdata(data)
assert_array_equal(r, [1.0, 2.0])
data = np.array([2**60, 2**60+1], dtype=np.int64)
r = rankdata(data)
assert_array_equal(r, [1.0, 2.0])
data = np.array([2**60, -2**60+1], dtype=np.int64)
r = rankdata(data)
assert_array_equal(r, [2.0, 1.0])
def test_big_tie(self):
for n in [10000, 100000, 1000000]:
data = np.ones(n, dtype=int)
r = rankdata(data)
expected_rank = 0.5 * (n + 1)
assert_array_equal(r, expected_rank * data,
"test failed with n=%d" % n)
def test_axis(self):
data = [[0, 2, 1],
[4, 2, 2]]
expected0 = [[1., 1.5, 1.],
[2., 1.5, 2.]]
r0 = rankdata(data, axis=0)
assert_array_equal(r0, expected0)
expected1 = [[1., 3., 2.],
[3., 1.5, 1.5]]
r1 = rankdata(data, axis=1)
assert_array_equal(r1, expected1)
methods = ["average", "min", "max", "dense", "ordinal"]
dtypes = [np.float64] + [np.int_]*4
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("method, dtype", zip(methods, dtypes))
def test_size_0_axis(self, axis, method, dtype):
shape = (3, 0)
data = np.zeros(shape)
r = rankdata(data, method=method, axis=axis)
assert_equal(r.shape, shape)
assert_equal(r.dtype, dtype)
_cases = (
# values, method, expected
([], 'average', []),
([], 'min', []),
([], 'max', []),
([], 'dense', []),
([], 'ordinal', []),
#
([100], 'average', [1.0]),
([100], 'min', [1.0]),
([100], 'max', [1.0]),
([100], 'dense', [1.0]),
([100], 'ordinal', [1.0]),
#
([100, 100, 100], 'average', [2.0, 2.0, 2.0]),
([100, 100, 100], 'min', [1.0, 1.0, 1.0]),
([100, 100, 100], 'max', [3.0, 3.0, 3.0]),
([100, 100, 100], 'dense', [1.0, 1.0, 1.0]),
([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]),
#
([100, 300, 200], 'average', [1.0, 3.0, 2.0]),
([100, 300, 200], 'min', [1.0, 3.0, 2.0]),
([100, 300, 200], 'max', [1.0, 3.0, 2.0]),
([100, 300, 200], 'dense', [1.0, 3.0, 2.0]),
([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]),
#
([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]),
([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]),
([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]),
([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]),
([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]),
#
([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]),
([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]),
([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]),
([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]),
([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]),
#
([10] * 30, 'ordinal', np.arange(1.0, 31.0)),
)
def test_cases():
for values, method, expected in _cases:
r = rankdata(values, method=method)
assert_array_equal(r, expected)
|
|
"""The tests for the device tracker component."""
from datetime import datetime, timedelta
import json
import logging
import os
from unittest.mock import Mock, call, patch
import pytest
from homeassistant.components import zone
import homeassistant.components.device_tracker as device_tracker
from homeassistant.components.device_tracker import const, legacy
from homeassistant.const import (
ATTR_ENTITY_PICTURE,
ATTR_FRIENDLY_NAME,
ATTR_GPS_ACCURACY,
ATTR_ICON,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_PLATFORM,
STATE_HOME,
STATE_NOT_HOME,
)
from homeassistant.core import State, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import discovery
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
assert_setup_component,
async_fire_time_changed,
mock_registry,
mock_restore_cache,
patch_yaml_files,
)
from tests.components.device_tracker import common
TEST_PLATFORM = {device_tracker.DOMAIN: {CONF_PLATFORM: "test"}}
_LOGGER = logging.getLogger(__name__)
@pytest.fixture(name="yaml_devices")
def mock_yaml_devices(hass):
"""Get a path for storing yaml devices."""
yaml_devices = hass.config.path(legacy.YAML_DEVICES)
if os.path.isfile(yaml_devices):
os.remove(yaml_devices)
yield yaml_devices
if os.path.isfile(yaml_devices):
os.remove(yaml_devices)
async def test_is_on(hass):
"""Test is_on method."""
entity_id = f"{const.DOMAIN}.test"
hass.states.async_set(entity_id, STATE_HOME)
assert device_tracker.is_on(hass, entity_id)
hass.states.async_set(entity_id, STATE_NOT_HOME)
assert not device_tracker.is_on(hass, entity_id)
async def test_reading_broken_yaml_config(hass):
"""Test when known devices contains invalid data."""
files = {
"empty.yaml": "",
"nodict.yaml": "100",
"badkey.yaml": "@:\n name: Device",
"noname.yaml": "my_device:\n",
"allok.yaml": "My Device:\n name: Device",
"oneok.yaml": ("My Device!:\n name: Device\nbad_device:\n nme: Device"),
}
args = {"hass": hass, "consider_home": timedelta(seconds=60)}
with patch_yaml_files(files):
assert await legacy.async_load_config("empty.yaml", **args) == []
assert await legacy.async_load_config("nodict.yaml", **args) == []
assert await legacy.async_load_config("noname.yaml", **args) == []
assert await legacy.async_load_config("badkey.yaml", **args) == []
res = await legacy.async_load_config("allok.yaml", **args)
assert len(res) == 1
assert res[0].name == "Device"
assert res[0].dev_id == "my_device"
res = await legacy.async_load_config("oneok.yaml", **args)
assert len(res) == 1
assert res[0].name == "Device"
assert res[0].dev_id == "my_device"
async def test_reading_yaml_config(hass, yaml_devices, enable_custom_integrations):
"""Test the rendering of the YAML configuration."""
dev_id = "test"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
"AB:CD:EF:GH:IJ",
"Test name",
picture="http://test.picture",
icon="mdi:kettle",
)
await hass.async_add_executor_job(
legacy.update_config, yaml_devices, dev_id, device
)
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
config = (await legacy.async_load_config(yaml_devices, hass, device.consider_home))[
0
]
assert device.dev_id == config.dev_id
assert device.track == config.track
assert device.mac == config.mac
assert device.config_picture == config.config_picture
assert device.consider_home == config.consider_home
assert device.icon == config.icon
assert f"{device_tracker.DOMAIN}.test" in hass.config.components
@patch("homeassistant.components.device_tracker.const.LOGGER.warning")
async def test_duplicate_mac_dev_id(mock_warning, hass):
"""Test adding duplicate MACs or device IDs to DeviceTracker."""
devices = [
legacy.Device(
hass, True, True, "my_device", "AB:01", "My device", None, None, False
),
legacy.Device(
hass, True, True, "your_device", "AB:01", "Your device", None, None, False
),
]
legacy.DeviceTracker(hass, False, True, {}, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert (
mock_warning.call_count == 1
), "The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert "Duplicate device MAC" in args[0], "Duplicate MAC warning expected"
mock_warning.reset_mock()
devices = [
legacy.Device(
hass, True, True, "my_device", "AB:01", "My device", None, None, False
),
legacy.Device(
hass, True, True, "my_device", None, "Your device", None, None, False
),
]
legacy.DeviceTracker(hass, False, True, {}, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert (
mock_warning.call_count == 1
), "The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert "Duplicate device IDs" in args[0], "Duplicate device IDs warning expected"
async def test_setup_without_yaml_file(hass, enable_custom_integrations):
"""Test with no YAML file."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
async def test_gravatar(hass):
"""Test the Gravatar generation."""
dev_id = "test"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
"AB:CD:EF:GH:IJ",
"Test name",
gravatar="test@example.com",
)
gravatar_url = (
"https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar"
)
assert device.config_picture == gravatar_url
async def test_gravatar_and_picture(hass):
"""Test that Gravatar overrides picture."""
dev_id = "test"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
"AB:CD:EF:GH:IJ",
"Test name",
picture="http://test.picture",
gravatar="test@example.com",
)
gravatar_url = (
"https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar"
)
assert device.config_picture == gravatar_url
@patch("homeassistant.components.device_tracker.legacy.DeviceTracker.see")
@patch("homeassistant.components.demo.device_tracker.setup_scanner", autospec=True)
async def test_discover_platform(mock_demo_setup_scanner, mock_see, hass):
"""Test discovery of device_tracker demo platform."""
await discovery.async_load_platform(
hass, device_tracker.DOMAIN, "demo", {"test_key": "test_val"}, {"bla": {}}
)
await hass.async_block_till_done()
assert device_tracker.DOMAIN in hass.config.components
assert mock_demo_setup_scanner.called
assert mock_demo_setup_scanner.call_args[0] == (
hass,
{},
mock_see,
{"test_key": "test_val"},
)
async def test_update_stale(hass, mock_device_tracker_conf, enable_custom_integrations):
"""Test stalled update."""
scanner = getattr(hass.components, "test.device_tracker").SCANNER
scanner.reset()
scanner.come_home("DEV1")
now = dt_util.utcnow()
register_time = datetime(now.year + 1, 9, 15, 23, tzinfo=dt_util.UTC)
scan_time = datetime(now.year + 1, 9, 15, 23, 1, tzinfo=dt_util.UTC)
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=register_time,
), assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(
hass,
device_tracker.DOMAIN,
{
device_tracker.DOMAIN: {
CONF_PLATFORM: "test",
device_tracker.CONF_CONSIDER_HOME: 59,
}
},
)
await hass.async_block_till_done()
assert hass.states.get("device_tracker.dev1").state == STATE_HOME
scanner.leave_home("DEV1")
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=scan_time,
):
async_fire_time_changed(hass, scan_time)
await hass.async_block_till_done()
assert hass.states.get("device_tracker.dev1").state == STATE_NOT_HOME
async def test_entity_attributes(
hass, mock_device_tracker_conf, enable_custom_integrations
):
"""Test the entity attributes."""
devices = mock_device_tracker_conf
dev_id = "test_entity"
entity_id = f"{const.DOMAIN}.{dev_id}"
friendly_name = "Paulus"
picture = "http://placehold.it/200x200"
icon = "mdi:kettle"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
None,
friendly_name,
picture,
icon=icon,
)
devices.append(device)
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
attrs = hass.states.get(entity_id).attributes
assert friendly_name == attrs.get(ATTR_FRIENDLY_NAME)
assert icon == attrs.get(ATTR_ICON)
assert picture == attrs.get(ATTR_ENTITY_PICTURE)
@patch("homeassistant.components.device_tracker.legacy.DeviceTracker.async_see")
async def test_see_service(mock_see, hass, enable_custom_integrations):
"""Test the see service with a unicode dev_id and NO MAC."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
params = {
"dev_id": "some_device",
"host_name": "example.com",
"location_name": "Work",
"gps": [0.3, 0.8],
"attributes": {"test": "test"},
}
common.async_see(hass, **params)
await hass.async_block_till_done()
assert mock_see.call_count == 1
assert mock_see.call_count == 1
assert mock_see.call_args == call(**params)
mock_see.reset_mock()
params["dev_id"] += chr(233) # e' acute accent from icloud
common.async_see(hass, **params)
await hass.async_block_till_done()
assert mock_see.call_count == 1
assert mock_see.call_count == 1
assert mock_see.call_args == call(**params)
async def test_see_service_guard_config_entry(
hass, mock_device_tracker_conf, enable_custom_integrations
):
"""Test the guard if the device is registered in the entity registry."""
mock_entry = Mock()
dev_id = "test"
entity_id = f"{const.DOMAIN}.{dev_id}"
mock_registry(hass, {entity_id: mock_entry})
devices = mock_device_tracker_conf
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
params = {"dev_id": dev_id, "gps": [0.3, 0.8]}
common.async_see(hass, **params)
await hass.async_block_till_done()
assert not devices
async def test_new_device_event_fired(
hass, mock_device_tracker_conf, enable_custom_integrations
):
"""Test that the device tracker will fire an event."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
test_events = []
@callback
def listener(event):
"""Record that our event got called."""
test_events.append(event)
hass.bus.async_listen("device_tracker_new_device", listener)
common.async_see(hass, "mac_1", host_name="hello")
common.async_see(hass, "mac_1", host_name="hello")
await hass.async_block_till_done()
assert len(test_events) == 1
# Assert we can serialize the event
json.dumps(test_events[0].as_dict(), cls=JSONEncoder)
assert test_events[0].data == {
"entity_id": "device_tracker.hello",
"host_name": "hello",
"mac": "MAC_1",
}
async def test_duplicate_yaml_keys(
hass, mock_device_tracker_conf, enable_custom_integrations
):
"""Test that the device tracker will not generate invalid YAML."""
devices = mock_device_tracker_conf
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
common.async_see(hass, "mac_1", host_name="hello")
common.async_see(hass, "mac_2", host_name="hello")
await hass.async_block_till_done()
assert len(devices) == 2
assert devices[0].dev_id != devices[1].dev_id
async def test_invalid_dev_id(
hass, mock_device_tracker_conf, enable_custom_integrations
):
"""Test that the device tracker will not allow invalid dev ids."""
devices = mock_device_tracker_conf
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
common.async_see(hass, dev_id="hello-world")
await hass.async_block_till_done()
assert not devices
async def test_see_state(hass, yaml_devices, enable_custom_integrations):
"""Test device tracker see records state correctly."""
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
params = {
"mac": "AA:BB:CC:DD:EE:FF",
"dev_id": "some_device",
"host_name": "example.com",
"location_name": "Work",
"gps": [0.3, 0.8],
"gps_accuracy": 1,
"battery": 100,
"attributes": {"test": "test", "number": 1},
}
common.async_see(hass, **params)
await hass.async_block_till_done()
config = await legacy.async_load_config(yaml_devices, hass, timedelta(seconds=0))
assert len(config) == 1
state = hass.states.get("device_tracker.example_com")
attrs = state.attributes
assert state.state == "Work"
assert state.object_id == "example_com"
assert state.name == "example.com"
assert attrs["friendly_name"] == "example.com"
assert attrs["battery"] == 100
assert attrs["latitude"] == 0.3
assert attrs["longitude"] == 0.8
assert attrs["test"] == "test"
assert attrs["gps_accuracy"] == 1
assert attrs["source_type"] == "gps"
assert attrs["number"] == 1
async def test_see_passive_zone_state(
hass, mock_device_tracker_conf, enable_custom_integrations
):
"""Test that the device tracker sets gps for passive trackers."""
now = dt_util.utcnow()
register_time = datetime(now.year + 1, 9, 15, 23, tzinfo=dt_util.UTC)
scan_time = datetime(now.year + 1, 9, 15, 23, 1, tzinfo=dt_util.UTC)
with assert_setup_component(1, zone.DOMAIN):
zone_info = {
"name": "Home",
"latitude": 1,
"longitude": 2,
"radius": 250,
"passive": False,
}
await async_setup_component(hass, zone.DOMAIN, {"zone": zone_info})
scanner = getattr(hass.components, "test.device_tracker").SCANNER
scanner.reset()
scanner.come_home("dev1")
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=register_time,
), assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(
hass,
device_tracker.DOMAIN,
{
device_tracker.DOMAIN: {
CONF_PLATFORM: "test",
device_tracker.CONF_CONSIDER_HOME: 59,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("device_tracker.dev1")
attrs = state.attributes
assert state.state == STATE_HOME
assert state.object_id == "dev1"
assert state.name == "dev1"
assert attrs.get("friendly_name") == "dev1"
assert attrs.get("latitude") == 1
assert attrs.get("longitude") == 2
assert attrs.get("gps_accuracy") == 0
assert attrs.get("source_type") == device_tracker.SOURCE_TYPE_ROUTER
scanner.leave_home("dev1")
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=scan_time,
):
async_fire_time_changed(hass, scan_time)
await hass.async_block_till_done()
state = hass.states.get("device_tracker.dev1")
attrs = state.attributes
assert state.state == STATE_NOT_HOME
assert state.object_id == "dev1"
assert state.name == "dev1"
assert attrs.get("friendly_name") == "dev1"
assert attrs.get("latitude") is None
assert attrs.get("longitude") is None
assert attrs.get("gps_accuracy") is None
assert attrs.get("source_type") == device_tracker.SOURCE_TYPE_ROUTER
@patch("homeassistant.components.device_tracker.const.LOGGER.warning")
async def test_see_failures(mock_warning, hass, mock_device_tracker_conf):
"""Test that the device tracker see failures."""
devices = mock_device_tracker_conf
tracker = legacy.DeviceTracker(hass, timedelta(seconds=60), 0, {}, [])
# MAC is not a string (but added)
await tracker.async_see(mac=567, host_name="Number MAC")
# No device id or MAC(not added)
with pytest.raises(HomeAssistantError):
await tracker.async_see()
assert mock_warning.call_count == 0
# Ignore gps on invalid GPS (both added & warnings)
await tracker.async_see(mac="mac_1_bad_gps", gps=1)
await tracker.async_see(mac="mac_2_bad_gps", gps=[1])
await tracker.async_see(mac="mac_3_bad_gps", gps="gps")
await hass.async_block_till_done()
assert mock_warning.call_count == 3
assert len(devices) == 4
async def test_async_added_to_hass(hass):
"""Test restoring state."""
attr = {
ATTR_LONGITUDE: 18,
ATTR_LATITUDE: -33,
const.ATTR_SOURCE_TYPE: "gps",
ATTR_GPS_ACCURACY: 2,
const.ATTR_BATTERY: 100,
}
mock_restore_cache(hass, [State("device_tracker.jk", "home", attr)])
path = hass.config.path(legacy.YAML_DEVICES)
files = {path: "jk:\n name: JK Phone\n track: True"}
with patch_yaml_files(files):
assert await async_setup_component(hass, device_tracker.DOMAIN, {})
state = hass.states.get("device_tracker.jk")
assert state
assert state.state == "home"
for key, val in attr.items():
atr = state.attributes.get(key)
assert atr == val, f"{key}={atr} expected: {val}"
async def test_bad_platform(hass):
"""Test bad platform."""
config = {"device_tracker": [{"platform": "bad_platform"}]}
with assert_setup_component(0, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, config)
assert f"{device_tracker.DOMAIN}.bad_platform" not in hass.config.components
async def test_adding_unknown_device_to_config(
mock_device_tracker_conf, hass, enable_custom_integrations
):
"""Test the adding of unknown devices to configuration file."""
scanner = getattr(hass.components, "test.device_tracker").SCANNER
scanner.reset()
scanner.come_home("DEV1")
await async_setup_component(
hass, device_tracker.DOMAIN, {device_tracker.DOMAIN: {CONF_PLATFORM: "test"}}
)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
device = mock_device_tracker_conf[0]
assert device.dev_id == "dev1"
assert device.track
async def test_picture_and_icon_on_see_discovery(mock_device_tracker_conf, hass):
"""Test that picture and icon are set in initial see."""
tracker = legacy.DeviceTracker(hass, timedelta(seconds=60), False, {}, [])
await tracker.async_see(dev_id=11, picture="pic_url", icon="mdi:icon")
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].icon == "mdi:icon"
assert mock_device_tracker_conf[0].entity_picture == "pic_url"
async def test_backward_compatibility_for_track_new(mock_device_tracker_conf, hass):
"""Test backward compatibility for track new."""
tracker = legacy.DeviceTracker(
hass, timedelta(seconds=60), False, {device_tracker.CONF_TRACK_NEW: True}, []
)
await tracker.async_see(dev_id=13)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].track is False
async def test_old_style_track_new_is_skipped(mock_device_tracker_conf, hass):
"""Test old style config is skipped."""
tracker = legacy.DeviceTracker(
hass, timedelta(seconds=60), None, {device_tracker.CONF_TRACK_NEW: False}, []
)
await tracker.async_see(dev_id=14)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].track is False
def test_see_schema_allowing_ios_calls():
"""Test SEE service schema allows extra keys.
Temp work around because the iOS app sends incorrect data.
"""
device_tracker.SERVICE_SEE_PAYLOAD_SCHEMA(
{
"dev_id": "Test",
"battery": 35,
"battery_status": "Not Charging",
"gps": [10.0, 10.0],
"gps_accuracy": 300,
"hostname": "beer",
}
)
|
|
# -*- coding: utf-8 -*-
from datetime import datetime
import argparse
import json
import requests
from flask import Flask, request, render_template
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import config
import db
import utils
from names import POKEMON_NAMES
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Check whether config has all necessary attributes
REQUIRED_SETTINGS = (
'TRASH_IDS',
'AREA_NAME',
'REPORT_SINCE',
'SCAN_RADIUS',
'MAP_PROVIDER_URL',
'MAP_PROVIDER_ATTRIBUTION',
'DISABLE_WORKERS',
)
for setting_name in REQUIRED_SETTINGS:
if not hasattr(config, setting_name):
raise RuntimeError('Please set "{}" in config'.format(setting_name))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-H',
'--host',
help='Set web server listening host',
default='127.0.0.1'
)
parser.add_argument(
'-P',
'--port',
type=int,
help='Set web server listening port',
default=5000
)
parser.add_argument(
'-d', '--debug', help='Debug Mode', action='store_true'
)
parser.add_argument(
'-A',
'--after',
type=int,
help='Get all sightings after a certain time',
default=0
)
parser.set_defaults(DEBUG=True)
return parser.parse_args()
app = Flask(__name__, template_folder='templates')
@app.route('/data')
def pokemon_data():
return json.dumps(get_pokemarkers())
@app.route('/discord')
def discord():
"""Gets all the PokeMarkers via REST"""
return json.dumps(get_pokeDiscord())
@app.route('/workers_data')
def workers_data():
return json.dumps({
'points': get_worker_markers(),
'scan_radius': config.SCAN_RADIUS,
})
@app.route('/')
def fullmap():
map_center = utils.get_map_center()
return render_template(
'newmap.html',
area_name=config.AREA_NAME,
map_center=map_center,
map_provider_url=config.MAP_PROVIDER_URL,
map_provider_attribution=config.MAP_PROVIDER_ATTRIBUTION,
)
def get_pokeDiscord():
data = []
# Get the Pokemon out of the Database
session = db.Session()
pokemons = db.get_sightings(session)
session.close()
for pokemon in pokemons:
name = POKEMON_NAMES[pokemon.pokemon_id]
datestr = datetime.fromtimestamp(pokemon.expire_timestamp)
dateoutput = datestr.strftime("%H:%M:%S")
data.append({
'type': 'pokemon',
'name': name,
'key': '{}-{}'.format(pokemon.pokemon_id, pokemon.spawn_id),
'disappear_time': pokemon.expire_timestamp,
'icon': 'static/icons/%d.png' % pokemon.pokemon_id,
'lat': pokemon.lat,
'lng': pokemon.lon,
'pokemon_id': pokemon.pokemon_id,
'ATK_IV': pokemon.ATK_IV,
'DEF_IV': pokemon.DEF_IV,
'STA_IV': pokemon.STA_IV
})
return data
def get_pokemarkers():
markers = []
session = db.Session()
if args.after == 0:
pokemons = db.get_sightings(session)
else:
pokemons = db.get_sightings_after(session, args.after)
forts = db.get_forts(session)
pokestops = db.get_pokestops(session)
session.close()
for pokemon in pokemons:
markers.append({
'id': 'pokemon-{}'.format(pokemon.id),
'type': 'pokemon',
'trash': pokemon.pokemon_id in config.TRASH_IDS,
'name': POKEMON_NAMES[pokemon.pokemon_id],
'pokemon_id': pokemon.pokemon_id,
'lat': pokemon.lat,
'lon': pokemon.lon,
'expires_at': pokemon.expire_timestamp,
})
for fort in forts:
if fort['guard_pokemon_id']:
pokemon_name = POKEMON_NAMES[fort['guard_pokemon_id']]
else:
pokemon_name = 'Empty'
markers.append({
'id': 'fort-{}'.format(fort['fort_id']),
'sighting_id': fort['id'],
'type': 'fort',
'prestige': fort['prestige'],
'pokemon_id': fort['guard_pokemon_id'],
'pokemon_name': pokemon_name,
'team': fort['team'],
'lat': fort['lat'],
'lon': fort['lon'],
})
for pokestop in pokestops:
markers.append({
'id': 'stop-{}'.format(pokestop['id']),
'type': 'pokestop',
'lat': pokestop['lat'],
'lon': pokestop['lon'],
})
return markers
def get_worker_markers():
markers = []
points = utils.get_points_per_worker()
# Worker start points
for worker_no, worker_points in enumerate(points):
coords = utils.get_start_coords(worker_no)
if (worker_no not in config.DISABLE_WORKERS):
markers.append({
'lat': coords[0],
'lon': coords[1],
'type': 'worker',
'worker_no': worker_no,
})
# Circles
for i, point in enumerate(worker_points):
markers.append({
'lat': point[0],
'lon': point[1],
'type': 'worker_point',
'worker_no': worker_no,
'point_no': i,
})
return markers
@app.route('/report')
def report_main():
session = db.Session()
top_pokemon = db.get_top_pokemon(session)
bottom_pokemon = db.get_top_pokemon(session, order='ASC')
bottom_sightings = db.get_all_sightings(
session, [r[0] for r in bottom_pokemon]
)
stage2_pokemon = db.get_stage2_pokemon(session)
if stage2_pokemon:
stage2_sightings = db.get_all_sightings(
session, [r[0] for r in stage2_pokemon]
)
else:
stage2_sightings = []
js_data = {
'charts_data': {
'punchcard': db.get_punch_card(session),
'top30': [(POKEMON_NAMES[r[0]], r[1]) for r in top_pokemon],
'bottom30': [
(POKEMON_NAMES[r[0]], r[1]) for r in bottom_pokemon
],
'stage2': [
(POKEMON_NAMES[r[0]], r[1]) for r in stage2_pokemon
],
},
'maps_data': {
'bottom30': [sighting_to_marker(s) for s in bottom_sightings],
'stage2': [sighting_to_marker(s) for s in stage2_sightings],
},
'map_center': utils.get_map_center(),
'zoom': 13,
}
icons = {
'top30': [(r[0], POKEMON_NAMES[r[0]]) for r in top_pokemon],
'bottom30': [(r[0], POKEMON_NAMES[r[0]]) for r in bottom_pokemon],
'stage2': [(r[0], POKEMON_NAMES[r[0]]) for r in stage2_pokemon],
'nonexistent': [
(r, POKEMON_NAMES[r])
for r in db.get_nonexistent_pokemon(session)
]
}
session_stats = db.get_session_stats(session)
session.close()
area = utils.get_scan_area()
return render_template(
'report.html',
current_date=datetime.now(),
area_name=config.AREA_NAME,
area_size=area,
total_spawn_count=session_stats['count'],
spawns_per_hour=session_stats['per_hour'],
session_start=session_stats['start'],
session_end=session_stats['end'],
session_length_hours=int(session_stats['length_hours']),
js_data=js_data,
icons=icons,
google_maps_key=config.GOOGLE_MAPS_KEY,
)
@app.route('/report/<int:pokemon_id>')
def report_single(pokemon_id):
session = db.Session()
session_stats = db.get_session_stats(session)
js_data = {
'charts_data': {
'hours': db.get_spawns_per_hour(session, pokemon_id),
},
'map_center': utils.get_map_center(),
'zoom': 13,
}
session.close()
return render_template(
'report_single.html',
current_date=datetime.now(),
area_name=config.AREA_NAME,
area_size=utils.get_scan_area(),
pokemon_id=pokemon_id,
pokemon_name=POKEMON_NAMES[pokemon_id],
total_spawn_count=db.get_total_spawns_count(session, pokemon_id),
session_start=session_stats['start'],
session_end=session_stats['end'],
session_length_hours=int(session_stats['length_hours']),
google_maps_key=config.GOOGLE_MAPS_KEY,
js_data=js_data,
)
def sighting_to_marker(sighting):
return {
'icon': '/static/icons/{}.png'.format(sighting.pokemon_id),
'lat': sighting.lat,
'lon': sighting.lon,
}
@app.route('/report/heatmap')
def report_heatmap():
session = db.Session()
pokemon_id = request.args.get('id')
points = db.get_all_spawn_coords(session, pokemon_id=pokemon_id)
session.close()
return json.dumps(points)
@app.route('/report/heatmap/time_based')
def report_time_based_heatmap():
session = db.Session()
pokemon_id = request.args.get('id')
time_data = db.get_spawns_per_minute(session, pokemon_id)
session.close()
return json.dumps(time_data)
if __name__ == '__main__':
args = get_args()
app.run(debug=True, threaded=True, host=args.host, port=args.port)
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PEP8 tests for yapf.reformatter."""
import textwrap
import unittest
from yapf.yapflib import reformatter
from yapf.yapflib import style
from yapftests import yapf_test_helper
class TestsForPEP8Style(yapf_test_helper.YAPFTest):
@classmethod
def setUpClass(cls):
style.SetGlobalStyle(style.CreatePEP8Style())
def testIndent4(self):
unformatted_code = textwrap.dedent("""\
if a+b:
pass
""")
expected_formatted_code = textwrap.dedent("""\
if a + b:
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testSingleLineIfStatements(self):
code = textwrap.dedent("""\
if True: a = 42
elif False: b = 42
else: c = 42
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testNoBlankBetweenClassAndDef(self):
unformatted_code = textwrap.dedent("""\
class Foo:
def joe():
pass
""")
expected_formatted_code = textwrap.dedent("""\
class Foo:
def joe():
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testSingleWhiteBeforeTrailingComment(self):
unformatted_code = textwrap.dedent("""\
if a+b: # comment
pass
""")
expected_formatted_code = textwrap.dedent("""\
if a + b: # comment
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testSpaceBetweenEndingCommandAndClosingBracket(self):
unformatted_code = textwrap.dedent("""\
a = (
1,
)
""")
expected_formatted_code = textwrap.dedent("""\
a = (1, )
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testContinuedNonOutdentedLine(self):
code = textwrap.dedent("""\
class eld(d):
if str(geom.geom_type).upper(
) != self.geom_type and not self.geom_type == 'GEOMETRY':
ror(code='om_type')
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
def testWrappingPercentExpressions(self):
unformatted_code = textwrap.dedent("""\
def f():
if True:
zzzzz = '%s-%s' % (xxxxxxxxxxxxxxxxxxxxxxxxxx + 1, xxxxxxxxxxxxxxxxx.yyy + 1)
zzzzz = '%s-%s'.ww(xxxxxxxxxxxxxxxxxxxxxxxxxx + 1, xxxxxxxxxxxxxxxxx.yyy + 1)
zzzzz = '%s-%s' % (xxxxxxxxxxxxxxxxxxxxxxx + 1, xxxxxxxxxxxxxxxxxxxxx + 1)
zzzzz = '%s-%s'.ww(xxxxxxxxxxxxxxxxxxxxxxx + 1, xxxxxxxxxxxxxxxxxxxxx + 1)
""")
expected_formatted_code = textwrap.dedent("""\
def f():
if True:
zzzzz = '%s-%s' % (xxxxxxxxxxxxxxxxxxxxxxxxxx + 1,
xxxxxxxxxxxxxxxxx.yyy + 1)
zzzzz = '%s-%s'.ww(xxxxxxxxxxxxxxxxxxxxxxxxxx + 1,
xxxxxxxxxxxxxxxxx.yyy + 1)
zzzzz = '%s-%s' % (xxxxxxxxxxxxxxxxxxxxxxx + 1,
xxxxxxxxxxxxxxxxxxxxx + 1)
zzzzz = '%s-%s'.ww(xxxxxxxxxxxxxxxxxxxxxxx + 1,
xxxxxxxxxxxxxxxxxxxxx + 1)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testAlignClosingBracketWithVisualIndentation(self):
unformatted_code = textwrap.dedent("""\
TEST_LIST = ('foo', 'bar', # first comment
'baz' # second comment
)
""")
expected_formatted_code = textwrap.dedent("""\
TEST_LIST = (
'foo',
'bar', # first comment
'baz' # second comment
)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
unformatted_code = textwrap.dedent("""\
def f():
def g():
while (xxxxxxxxxxxxxxxxxxxx(yyyyyyyyyyyyy[zzzzz]) == 'aaaaaaaaaaa' and
xxxxxxxxxxxxxxxxxxxx(yyyyyyyyyyyyy[zzzzz].aaaaaaaa[0]) == 'bbbbbbb'
):
pass
""")
expected_formatted_code = textwrap.dedent("""\
def f():
def g():
while (xxxxxxxxxxxxxxxxxxxx(yyyyyyyyyyyyy[zzzzz]) == 'aaaaaaaaaaa'
and xxxxxxxxxxxxxxxxxxxx(
yyyyyyyyyyyyy[zzzzz].aaaaaaaa[0]) == 'bbbbbbb'):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testIndentSizeChanging(self):
unformatted_code = textwrap.dedent("""\
if True:
runtime_mins = (program_end_time - program_start_time).total_seconds() / 60.0
""")
expected_formatted_code = textwrap.dedent("""\
if True:
runtime_mins = (
program_end_time - program_start_time).total_seconds() / 60.0
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testHangingIndentCollision(self):
unformatted_code = textwrap.dedent("""\
if (aaaaaaaaaaaaaa + bbbbbbbbbbbbbbbb == ccccccccccccccccc and xxxxxxxxxxxxx or yyyyyyyyyyyyyyyyy):
pass
elif (xxxxxxxxxxxxxxx(aaaaaaaaaaa, bbbbbbbbbbbbbb, cccccccccccc, dddddddddd=None)):
pass
def h():
if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'):
pass
for connection in itertools.chain(branch.contact, branch.address, morestuff.andmore.andmore.andmore.andmore.andmore.andmore.andmore):
dosomething(connection)
""")
expected_formatted_code = textwrap.dedent("""\
if (aaaaaaaaaaaaaa + bbbbbbbbbbbbbbbb == ccccccccccccccccc and xxxxxxxxxxxxx
or yyyyyyyyyyyyyyyyy):
pass
elif (xxxxxxxxxxxxxxx(
aaaaaaaaaaa, bbbbbbbbbbbbbb, cccccccccccc, dddddddddd=None)):
pass
def h():
if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and
xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'):
pass
for connection in itertools.chain(
branch.contact, branch.address,
morestuff.andmore.andmore.andmore.andmore.andmore.andmore.andmore):
dosomething(connection)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testSplittingBeforeLogicalOperator(self):
try:
style.SetGlobalStyle(
style.CreateStyleFromConfig(
'{based_on_style: pep8, split_before_logical_operator: True}'))
unformatted_code = textwrap.dedent("""\
def foo():
return bool(update.message.new_chat_member or update.message.left_chat_member or
update.message.new_chat_title or update.message.new_chat_photo or
update.message.delete_chat_photo or update.message.group_chat_created or
update.message.supergroup_chat_created or update.message.channel_chat_created
or update.message.migrate_to_chat_id or update.message.migrate_from_chat_id or
update.message.pinned_message)
""")
expected_formatted_code = textwrap.dedent("""\
def foo():
return bool(
update.message.new_chat_member or update.message.left_chat_member
or update.message.new_chat_title or update.message.new_chat_photo
or update.message.delete_chat_photo
or update.message.group_chat_created
or update.message.supergroup_chat_created
or update.message.channel_chat_created
or update.message.migrate_to_chat_id
or update.message.migrate_from_chat_id
or update.message.pinned_message)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code,
reformatter.Reformat(uwlines))
finally:
style.SetGlobalStyle(style.CreatePEP8Style())
def testContiguousListEndingWithComment(self):
unformatted_code = textwrap.dedent("""\
if True:
if True:
keys.append(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa) # may be unassigned.
""")
expected_formatted_code = textwrap.dedent("""\
if True:
if True:
keys.append(
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa) # may be unassigned.
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testSplittingBeforeFirstArgument(self):
try:
style.SetGlobalStyle(
style.CreateStyleFromConfig(
'{based_on_style: pep8, split_before_first_argument: True}'))
unformatted_code = textwrap.dedent("""\
a_very_long_function_name(long_argument_name_1=1, long_argument_name_2=2,
long_argument_name_3=3, long_argument_name_4=4)
""")
expected_formatted_code = textwrap.dedent("""\
a_very_long_function_name(
long_argument_name_1=1,
long_argument_name_2=2,
long_argument_name_3=3,
long_argument_name_4=4)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code,
reformatter.Reformat(uwlines))
finally:
style.SetGlobalStyle(style.CreatePEP8Style())
def testSplittingExpressionsInsideSubscripts(self):
unformatted_code = textwrap.dedent("""\
def foo():
df = df[(df['campaign_status'] == 'LIVE') & (df['action_status'] == 'LIVE')]
""")
expected_formatted_code = textwrap.dedent("""\
def foo():
df = df[(df['campaign_status'] == 'LIVE')
& (df['action_status'] == 'LIVE')]
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testSplitListsAndDictSetMakersIfCommaTerminated(self):
unformatted_code = textwrap.dedent("""\
DJANGO_TEMPLATES_OPTIONS = {"context_processors": []}
DJANGO_TEMPLATES_OPTIONS = {"context_processors": [],}
x = ["context_processors"]
x = ["context_processors",]
""")
expected_formatted_code = textwrap.dedent("""\
DJANGO_TEMPLATES_OPTIONS = {"context_processors": []}
DJANGO_TEMPLATES_OPTIONS = {
"context_processors": [],
}
x = ["context_processors"]
x = [
"context_processors",
]
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testSplitAroundNamedAssigns(self):
unformatted_code = textwrap.dedent("""\
class a():
def a(): return a(
aaaaaaaaaa=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa)
""")
expected_formatted_code = textwrap.dedent("""\
class a():
def a():
return a(
aaaaaaaaaa=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testUnaryOperator(self):
unformatted_code = textwrap.dedent("""\
if not -3 < x < 3:
pass
if -3 < x < 3:
pass
""")
expected_formatted_code = textwrap.dedent("""\
if not -3 < x < 3:
pass
if -3 < x < 3:
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
def testNoSplitBeforeDictValue(self):
try:
style.SetGlobalStyle(
style.CreateStyleFromConfig('{based_on_style: pep8, '
'allow_split_before_dict_value: false, '
'coalesce_brackets: true, '
'dedent_closing_brackets: true, '
'each_dict_entry_on_separate_line: true, '
'split_before_logical_operator: true}'))
unformatted_code = textwrap.dedent("""\
some_dict = {
'title': _("I am example data"),
'description': _("Lorem ipsum dolor met sit amet elit, si vis pacem para bellum "
"elites nihi very long string."),
}
""")
expected_formatted_code = textwrap.dedent("""\
some_dict = {
'title': _("I am example data"),
'description': _(
"Lorem ipsum dolor met sit amet elit, si vis pacem para bellum "
"elites nihi very long string."
),
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code,
reformatter.Reformat(uwlines))
unformatted_code = textwrap.dedent("""\
X = {'a': 1, 'b': 2, 'key': this_is_a_function_call_that_goes_over_the_column_limit_im_pretty_sure()}
""")
expected_formatted_code = textwrap.dedent("""\
X = {
'a': 1,
'b': 2,
'key': this_is_a_function_call_that_goes_over_the_column_limit_im_pretty_sure()
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code,
reformatter.Reformat(uwlines))
unformatted_code = textwrap.dedent("""\
attrs = {
'category': category,
'role': forms.ModelChoiceField(label=_("Role"), required=False, queryset=category_roles, initial=selected_role, empty_label=_("No access"),),
}
""")
expected_formatted_code = textwrap.dedent("""\
attrs = {
'category': category,
'role': forms.ModelChoiceField(
label=_("Role"),
required=False,
queryset=category_roles,
initial=selected_role,
empty_label=_("No access"),
),
}
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code,
reformatter.Reformat(uwlines))
unformatted_code = textwrap.dedent("""\
css_class = forms.CharField(
label=_("CSS class"),
required=False,
help_text=_("Optional CSS class used to customize this category appearance from templates."),
)
""")
expected_formatted_code = textwrap.dedent("""\
css_class = forms.CharField(
label=_("CSS class"),
required=False,
help_text=_(
"Optional CSS class used to customize this category appearance from templates."
),
)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code,
reformatter.Reformat(uwlines))
finally:
style.SetGlobalStyle(style.CreatePEP8Style())
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import division
import numpy as np
import datetime
import rosbag
import rospy
from copy import copy, deepcopy
from exceptions import KeyError, ValueError
from segway_rmp.msg import SegwayStatusStamped
from geometry_msgs.msg import PoseStamped
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from math import cos, sin, tan, exp
from tf import transformations as trafo
from boto.manage.cmdshell import start
from scipy.constants.constants import point
ndtFName = '/home/sujiwo/ORB_SLAM/Data/20151106-1/ndt.csv'
orbFName = '/home/sujiwo/ORB_SLAM/Data/20151106-1/orb-slam.csv'
def ClosestPointInLine (PointA, PointB, pointChk, ifPointInLine=None):
ap = pointChk.coord() - PointA.coord()
ab = PointB.coord() - PointA.coord()
cs = (np.dot(ap, ab) / np.dot(ab, ab))
ptx = PointA.coord() + ab * cs
retval = Pose(pointChk.timestamp, \
ptx[0], ptx[1], ptx[2], \
pointChk.qx, pointChk.qy, pointChk.qz, pointChk.qw)
if ifPointInLine is None :
return retval
else :
return retval, cs
class Pose :
def __init__ (self, t=0, _x=0, _y=0, _z=0, _qx=0, _qy=0, _qz=0, _qw=1):
try:
self.timestamp = t[0]
self.x = t[1]
self.y = t[2]
self.z = t[3]
self.qx = t[4]
self.qy = t[5]
self.qz = t[6]
self.qw = t[7]
except (TypeError, IndexError):
self.timestamp = t
self.x = _x
self.y = _y
self.z = _z
self.qx = _qx
self.qy = _qy
self.qz = _qz
self.qw = _qw
def plot (self, size=50, **kwargs):
return plt.scatter(self.x, self.y, s=size, linewidths=0, **kwargs)
@staticmethod
# RPY must be in Radian
def xyzEuler (x, y, z, roll, pitch, yaw, timestamp=0):
pose = Pose (timestamp, x, y, z)
qt = trafo.quaternion_from_euler(roll, pitch, yaw)
pose.qx = qt[0]
pose.qy = qt[1]
pose.qz = qt[2]
pose.qw = qt[3]
return pose
def quaternion (self):
return np.array([self.qx, self.qy, self.qz, self.qw])
def __str__ (self):
return "X={}, Y={}, Z={}, Qx={}, Qy={}, Qz={}, Qw={}".format(self.x, self.y, self.z, self.qx, self.qy, self.qz, self.qw)
def time (self):
return datetime.datetime.fromtimestamp(self.timestamp)
def offsetTime (self, t):
self.timestamp += t
def coord (self):
return np.array([self.x, self.y, self.z])
def rot (self):
return np.array([self.qx, self.qy, self.qz, self.qw])
def __sub__ (self, p1):
return np.array([self.x-p1.x, self.y-p1.y, self.z-p1.z])
# Only calculate movement, but does not overwrite the values
# currentTimestamp must be in second
def segwayMove (self, currentTimestamp, leftWheelVelocity, rightWheelVelocity, yawRate):
# this is minimum speed to consider yaw changes (ie. yaw damping)
minSpeed = 0.025
v = (leftWheelVelocity + rightWheelVelocity) / 2
dt = currentTimestamp - self.timestamp
# XXX: May need to change this line
if abs(v) > minSpeed:
w = yawRate #(yawRate+0.011)*0.98
else:
w = 0.0
x = self.x + v*cos(self.theta) * dt
y = self.y + v*sin(self.theta) * dt
theta = self.theta + w * dt
return x, y, theta
@staticmethod
def interpolate (pose1, pose2, ratio):
if (pose1.timestamp > pose2.timestamp) :
raise ValueError ("pose1 timestamp must be > pose2")
td = (pose2.timestamp - pose1.timestamp)
intpose = Pose(pose1.timestamp + ratio*td,
pose1.x + ratio*(pose2.x-pose1.x),
pose1.y + ratio*(pose2.y-pose1.y),
pose1.z + ratio*(pose2.z-pose1.z))
q1 = pose1.quaternion()
q2 = pose2.quaternion()
qInt = trafo.quaternion_slerp(q1, q2, ratio)
intpose.qx, intpose.qy, intpose.qz, intpose.qw = \
qInt[0], qInt[1], qInt[2], qInt[3]
return intpose
@staticmethod
def average (*poses):
avgpose = Pose()
xs = [p.x for p in poses]
ys = [p.y for p in poses]
zs = [p.z for p in poses]
avgpose.x = sum(xs) / len(poses)
avgpose.y = sum(ys) / len(poses)
avgpose.z = sum(zs) / len(poses)
avgpose.timestamp = np.average([p.timestamp for p in poses])
return avgpose
def publish (self, tfBroadCaster, frame1, frame2):
tfBroadCaster.sendTransform(
(self.x, self.y, self.z),
(self.qx, self.qy, self.qz, self.qw),
rospy.Time.from_sec(self.timestamp),
frame1, frame2
)
# Output euler angle in order of: Roll, Pitch, Yaw
def euler (self):
return np.array(trafo.euler_from_quaternion([self.qx, self.qy, self.qz, self.qw]))
def setRPY (self, roll, pitch, yaw):
pass
def distance (self, pose):
return np.linalg.norm([self.x-pose.x, self.y-pose.y, self.z-pose.z])
def inverse (self):
(qx, qy, qz, qw) = np.array([-self.qx, -self.qy, -self.qz, self.qw]) / np.linalg.norm([self.qx, self.qy, self.qz, self.qw])
return Pose(self.timestamp, -self.x, -self.y, -self.z, qx, qy, qz, qw)
def toMat4 (self):
mat4 = np.eye(4)
rotm = trafo.quaternion_matrix([self.qx, self.qy, self.qz, self.qw])
mat4[0:3,3] = (self.x, self.y, self.z)
mat4[0:3, 0:3] = rotm
return mat4
def toRotMat (self):
return trafo.quaternion_matrix([self.qx, self.qy, self.qz, self.qw])
def __mul__ (self, posev):
p = posev.apply(self)
p.timestamp = self.timestamp
return p
def __rmul__ (self, posev):
return self.apply(posev)
# ot * self
def apply (self, ot):
rotmat1 = self.toRotMat()
rotmat2 = ot.toRotMat()
q = trafo.quaternion_from_matrix(rotmat1.dot(rotmat2))
return Pose (self.timestamp,
rotmat1[0][0:3].dot([ot.x, ot.y, ot.z]) + self.x,
rotmat1[1][0:3].dot([ot.x, ot.y, ot.z]) + self.y,
rotmat1[2][0:3].dot([ot.x, ot.y, ot.z]) + self.z,
q[0], q[1], q[2], q[3])
def doApplyMe (self, ot):
p = self.apply(ot)
self.x = p.x
self.y = p.y
self.z = p.z
self.qx = p.qx
self.qy = p.qy
self.qz = p.qz
self.qw = p.qw
def measureErrorLatLon (self, groundTruth, timeTolerance=0.1, useZ=False):
def doMeasureDistance (p, q, useZ):
if useZ :
return np.linalg.norm ([p.x-q.x, p.y-q.y, p.z-q.z], 2)
else :
return np.linalg.norm ([p.x-q.x, p.y-q.y], 2)
errorLat = 0.0
errorLon = 0.0
pMin, pMax = groundTruth.findNearPosesByTime (self, timeTolerance)
if pMax is None:
errorLat = doMeasureDistance(self, pMin, useZ)
errorLon = errorLat
return errorLat, errorLon
if pMin is None:
errorLat = doMeasureDistance(self, pMax, useZ)
errorLon = errorLat
return errorLat, errorLon
pointChk, c = ClosestPointInLine(pMin, pMax, self, True)
if c>=0.0 and c<=1.0:
halfPoint = (pMin.coord() + pMax.coord()) / 2.0
errorLat = doMeasureDistance(pointChk, self, useZ)
errorLon = np.linalg.norm([halfPoint[0]-pointChk.x, halfPoint[1]-pointChk.y])
elif c<0.0:
errorLat = doMeasureDistance(pMin, self, useZ)
errorLon = doMeasureDistance(pMin, pointChk, useZ)
else:
errorLat = doMeasureDistance(pMax, self, useZ)
errorLon = doMeasureDistance(pMax, pointChk, useZ)
return errorLat, errorLon
def measureErrorLateral (self, groundTruth, timeTolerance=0.1, useZ=False):
def doMeasureDistance (p, q, useZ):
if useZ :
return np.linalg.norm ([p.x-q.x, p.y-q.y, p.z-q.z], 2)
else :
return np.linalg.norm ([p.x-q.x, p.y-q.y], 2)
pMin, pMax = groundTruth.findNearPosesByTime (self, timeTolerance)
# XXX: I know this is Wrong
# if pMin is None or pMax is None:
# return 1000
if pMax is None:
return doMeasureDistance(self, pMin, useZ)
if pMin is None:
return doMeasureDistance(self, pMax, useZ)
# if (pMin is None) or (pMax is None) :
# return -2.0
pointChk, c = ClosestPointInLine(pMin, pMax, self, True)
# Ideal case
if c>=0.0 and c<=1.0:
return doMeasureDistance(pointChk, self, useZ)
# Bad case
elif c<0.0:
return doMeasureDistance(pMin, self, useZ)
# return -3.0
else:
return doMeasureDistance(pMax, self, useZ)
# return -4.0
def measureErrorLongitudinal (self, groundTruth, timeTolerance=0.1, useZ=False):
def doMeasureDistance (p, q, useZ):
if useZ :
return np.linalg.norm ([p.x-q.x, p.y-q.y, p.z-q.z], 2)
else :
return np.linalg.norm ([p.x-q.x, p.y-q.y], 2)
pMin, pMax = groundTruth.findNearPosesByTime (self, timeTolerance)
if pMax is None:
return doMeasureDistance(self, pMin, useZ)
if pMin is None:
return doMeasureDistance(self, pMax, useZ)
pointChk, c = ClosestPointInLine(pMin, pMax, self, True)
if c>=0.0 and c<=1.0:
# XXX: Take halfpoint for approximation of `true' groundtruth
halfPoint = (pMin.coord() + pMax.coord()) / 2.0
return np.linalg.norm([halfPoint[0]-pointChk.x, halfPoint[1]-pointChk.y])
#return doMeasureDistance(halfPoint, pointChk, useZ)
elif c<0.0:
return doMeasureDistance(pMin, pointChk, useZ)
else:
return doMeasureDistance(pMax, pointChk, useZ)
pass
class PoseTable :
def __init__ (self):
self.table = []
self.idList = {}
self.c = 0
def __setitem__ (self, key, value):
self.table.append (value)
self.idList[key] = self.c
self.c += 1
def __getitem__ (self, key):
p = self.idList[key]
return self.table[p]
def __len__ (self):
return len(self.table)
def __iadd__ (self, offset):
for pose in self.table :
pose.offsetTime (offset)
return self
def __isub__ (self, offset):
return self.__iadd__(-offset)
def append (self, pose):
self.table.append (pose)
ckeys = self.idList.keys()
if (len(ckeys)==0):
ckey = -1
else :
ckey = max (ckeys)
self.idList[ckey+1] = self.c
self.c += 1
def apply (self, poseX):
for pose in self.table:
pose = pose.doApplyMe (poseX)
pass
def length (self, tolerance=0):
"""
Compute distance spanned by this pose. If miliSecTolerance is not specified, \
we assume that there is no gap
"""
totaldist = 0
for p in range(1, len(self.table)):
cpose = self.table[p]
ppose = self.table[p-1]
if (tolerance>0) :
if abs(ppose.timestamp - cpose.timestamp) > tolerance:
print('far')
continue
dist = np.linalg.norm([cpose.x-ppose.x, cpose.y-ppose.y, cpose.z-ppose.z])
totaldist += dist
return totaldist
def subsetByTime (self, startTimestamp, duration):
if startTimestamp < self.table[0].timestamp or startTimestamp > self.table[-1].timestamp:
raise KeyError("Invalid timestamp: outside table")
if startTimestamp+duration > self.table[-1].timestamp:
raise KeyError("Duration reaches the future")
subset = PoseTable()
for i in range(len(self.table)):
if self.table[i].timestamp >= startTimestamp:
break
while True:
p = self.table[i]
if startTimestamp + duration > p.timestamp:
subset.append(p)
else:
break
i += 1
return subset
def lengths (self):
dists = []
for p in range(1, len(self.table)):
cpose = self.table[p]
ppose = self.table[p-1]
dists.append (np.linalg.norm([cpose.x-ppose.x, cpose.y-ppose.y, cpose.z-ppose.z]))
return dists
def timeLengths (self):
timeDists = []
for p in range(1, len(self.table)):
cpose = self.table[p]
ppose = self.table[p-1]
timeDists.append (abs(cpose.timestamp - ppose.timestamp))
return timeDists
def toArray (self, includeTimestamp=False):
if (includeTimestamp==True) :
mlt = [[p.timestamp, p.x, p.y, p.z, p.qx, p.qy, p.qz, p.qw] for p in self.table]
else :
mlt = [[p.x, p.y, p.z, p.qx, p.qy, p.qz, p.qw] for p in self.table]
return np.array(mlt)
def findNearestByTime (self, pose, tolerance=0):
if (pose.timestamp < self.table[0].timestamp) :
raise KeyError ("Timestamp less than table")
if (pose.timestamp > self.table[self.c-1].timestamp) :
raise KeyError ("Timestamp is outside table: " + str(pose.timestamp))
candidates = set()
i = 0
for p in range(len(self.table)) :
i = p
cpose = self.table[i]
if (cpose.timestamp > pose.timestamp) :
candidates.add(cpose)
i-=1
break
while i!=0 :
cpose = self.table[i]
i -= 1
candidates.add (cpose)
if (cpose.timestamp < pose.timestamp) :
candidates.add (cpose)
break
if (tolerance>0) :
tcandidates=[]
for c in candidates:
c.tdif = abs(c.timestamp-pose.timestamp)
if c.tdif > tolerance:
pass
else:
tcandidates.append(c)
return sorted (tcandidates, key=lambda pose: pose.tdif)
#return sorted (candidates, key=lambda pose: pose.timestamp)
return min(candidates, key=lambda p: abs(p.timestamp-pose.timestamp))
def findNearPosesByTime (self, srcpose, tolerance=0.1):
if (srcpose.timestamp < self.table[0].timestamp) :
raise KeyError ("Timestamp less than table")
if (srcpose.timestamp > self.table[-1].timestamp) :
raise KeyError ("Timestamp is outside table: " + str(srcpose.timestamp))
nearMin = None
nearMax = None
for p in range(len(self)):
i = p
cpose = self.table[i]
if (cpose.timestamp > srcpose.timestamp):
nearMax = copy(cpose)
break
while i != 0 :
cpose = self.table[i]
i -= 1
if (cpose.timestamp < srcpose.timestamp):
nearMin = copy (cpose)
break
return (nearMin, nearMax)
def interpolateByTime (self, srcpose, tolerance=0.1):
pmin, pmax = self.findNearPosesByTime (srcpose, tolerance)
tRatio = (srcpose.timestamp - pmin.timestamp) / (pmax.timestamp - pmin.timestamp)
# Interpolation of Position
pvmin = pmin.coord()
pvmax = pmax.coord()
posInt = pvmin + tRatio * (pvmax - pvmin)
# Interpolation of orientation
qmin = pmin.quaternion()
qmax = pmax.quaternion()
qInt = trafo.quaternion_slerp(qmin, qmax, tRatio)
return Pose(srcpose.timestamp, posInt[0], posInt[1], posInt[2], \
qInt[0], qInt[1], qInt[2], qInt[3])
def interpolateByProjection (self, srcpose, tolerance=0.1):
pmin, pmax = self.findNearPosesByTime (srcpose, tolerance)
def findNearestInTime (self, timestamp, tolerance=0.1):
candidates = set()
for p in self.table:
tdiff = abs(p.timestamp - timestamp)
if (tdiff < tolerance):
candidates.add(p)
if p.timestamp > timestamp:
break
if (len(candidates)==0):
return None
return min(candidates, key=lambda p: abs(p.timestamp-timestamp))
def findNearestByDistance (self, pose, returnIdx=False, *args):
if (returnIdx==False):
return min(self.table,
key=lambda p:
np.linalg.norm(pose.coord()-p.coord()))
elif (returnIdx==True) :
dist = np.array([np.linalg.norm(pose.coord()-p.coord()) for p in self.table])
return np.argmin(dist)
elif len(args)>0 :
posecoord = np.array([pose, returnIdx, args[0]])
return min(self.table,
key=lambda p:
np.linalg.norm(posecoord-p.coord()))
def last(self):
return self.table[-1]
@staticmethod
def plotMulti (*tables):
pass
def plot (self, col1, col2, **kwargs):
array = self.toArray()
return plt.plot(array[:,col1], array[:,col2], **kwargs)
def plotRange (self, col1, col2, rangeFrom, rangeTo, **kwargs):
array = self.toArray()
return plt.plot(array[rangeFrom:rangeTo, col1], array[rangeFrom:rangeTo, col2], **kwargs)
# Choosing columns: 1->X, 2->Y, 3->Z
def plotTimeToAxis (self, col):
matr = self.toArray(True)
return plt.plot(matr[:,0], matr[:,col])
@staticmethod
def loadCsv (filename):
mat = np.loadtxt(filename)
records = PoseTable ()
for r in mat :
p = Pose (r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7])
records.append (p)
return records
@staticmethod
def loadFromTfBag (filename, sourceFrameName=None, targetFrameName=None) :
bagsrc = rosbag.Bag(filename, mode='r')
#topicInfo = bagsrc.get_type_and_topic_info('/tf')
i = 0
bagRecord = PoseTable ()
# Message timestamp is what recorded by `rosbag record'
# Transform timestamp is what reported by publisher
for topic, msg, msgTimestamp in bagsrc.read_messages('/tf') :
transform = msg.transforms[0].transform
header = msg.transforms[0].header
tfTimestamp = header.stamp
child_frame_id = msg.transforms[0].child_frame_id
if (sourceFrameName!=None and targetFrameName!=None) :
if header.frame_id!=sourceFrameName or child_frame_id!=targetFrameName :
continue
pose = Pose (tfTimestamp.to_sec(),
transform.translation.x, transform.translation.y, transform.translation.z,
transform.rotation.x, transform.rotation.y, transform.rotation.z, transform.rotation.w)
pose.counter = i
pose.msgTimestamp = msgTimestamp.to_sec()
bagRecord[i] = pose
i += 1
return bagRecord
@staticmethod
def loadFromPoseStampedBag (filename, topicName=None):
bagsrc = rosbag.Bag (filename, mode='r')
poseRecord = PoseTable()
for topic, msg, msgTimestamp in bagsrc.read_messages():
if (topicName is not None and topic != topicName):
continue
if (msg._type != 'geometry_msgs/PoseStamped') :
continue
cpose = Pose(msg.header.stamp.to_sec(), msg.pose.position.x, msg.pose.position.y, msg.pose.position.z,
msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w)
cpose.msgTimestamp = msgTimestamp.to_sec()
# Check for invalid values
if (cpose.x >= 1e10 or cpose.y>= 1e10 or cpose.z>=1e10) :
continue
poseRecord.append(cpose)
return poseRecord
@staticmethod
def loadFromArray (msrc):
if (msrc.shape[1] != 8):
raise ValueError ('Source has no timestamp')
# try to sort
msrc = sorted(msrc, key=lambda r: r[0])
table = PoseTable ()
for row in msrc:
pose = Pose(row)
table.append(pose)
return table
@staticmethod
def getFrameList (filename):
bagsrc = rosbag.Bag(filename, mode='r')
frames = {}
for topic, msg, timestamp in bagsrc.read_messages('/tf'):
transform = msg.transforms[0].transform
header = msg.transforms[0].header
child_frame_id = msg.transforms[0].child_frame_id
transformFrame = {'from':header.frame_id, 'to':child_frame_id, 'start':timestamp.to_sec()}
key = header.frame_id+child_frame_id
if (key not in frames):
frames[key] = transformFrame
return frames.values()
# Find all poses in current table that are in range of targetPoses timeframe
def getAllInTimeRanges (self, targetPoses):
matchInTime = PoseTable()
p1 = targetPoses[0]
p2 = targetPoses.last()
for p in self.table:
if p.timestamp >= p1.timestamp and p.timestamp<=p2.timestamp:
matchInTime.append (copy (p))
return matchInTime
@staticmethod
def compareErrorsByTime (poseTbl1, poseTbl2, useZ=True):
"""
poseTbl1 -> for source table
poseTbl2 -> for ground truth
"""
errorVect = []
i=0
for pose in poseTbl1.table:
try:
nearp = poseTbl2.findNearestByTime(pose)
if (useZ):
errv = np.linalg.norm([pose.x-nearp.x, pose.y-nearp.y, pose.z-nearp.z], 2)
else:
errv = np.linalg.norm([pose.x-nearp.x, pose.y-nearp.y], 2)
errorVect.append([pose.timestamp, errv])
i+=1
#if i>=10000:
# break
print ("{} out of {}".format(i, len(poseTbl1)))
except KeyError as e:
print e
return errorVect
@staticmethod
def compareErrorsByDistance (poseTblSource, groundTruth, useZ=True):
errorVect = []
i=0
for pose in poseTblSource.table:
try:
nearp = groundTruth.findNearestByDistance(pose)
if (useZ):
errv = np.linalg.norm([pose.x-nearp.x, pose.y-nearp.y, pose.z-nearp.z], 2)
else:
errv = np.linalg.norm([pose.x-nearp.x, pose.y-nearp.y], 2)
errorVect.append([pose.timestamp, errv])
i+=1
#if i>=10000:
# break
print ("{} out of {}".format(i, len(poseTblSource)))
except KeyError as e:
print e
return errorVect
@staticmethod
def compareLateralErrors (poseTblSource, groundTruth, tolerance=0.15, useZ=False):
i = 1
for pose in poseTblSource.table:
try:
errMeas = pose.measureErrorLateral (groundTruth, tolerance, useZ)
pose.measuredError = errMeas
# errorVect.append ([pose.timestamp, errMeas])
except KeyError as e:
pose.measuredError = -1.0
i += 1
print ("{} out of {}".format(i, len(poseTblSource)))
@staticmethod
def compareLongitudinalErrors (poseTblSource, groundTruth, tolerance=0.15, useZ=False):
i = 1
for pose in poseTblSource.table:
try:
errMeas = pose.measureErrorLongitudinal (groundTruth, tolerance, useZ)
pose.measuredError = errMeas
# errorVect.append ([pose.timestamp, errMeas])
except KeyError as e:
pose.measuredError = -1.0
i += 1
print ("{} out of {}".format(i, len(poseTblSource)))
@staticmethod
def compareErrors (poseTblSource, groundTruth, tolerance=0.15, useZ=False):
i = 1
for pose in poseTblSource.table:
try:
pose.errorLateral, pose.errorLongitudinal = pose.measureErrorLatLon (groundTruth, tolerance, useZ)
# errorVect.append ([pose.timestamp, errMeas])
except KeyError as e:
pose.errorLateral = -1.0
pose.errorLongitudinal = -1.0
i += 1
print ("{} out of {}".format(i, len(poseTblSource)))
pass
@staticmethod
# XXX: Unfinished
def removeSpuriousPoints (poseTbl1):
newposetbl = PoseTable()
for pose in poseTbl1.table:
pass
return newposetbl
def findBlankTime(self, timeTolerance=0.5):
blanks = []
for p in range(1, len(self.table)):
cpose = self.table[p]
ppose = self.table[p-1]
if abs(cpose.timestamp - ppose.timestamp) > timeTolerance:
blanks.append([ppose, cpose])
return blanks
def lengthFrom2Pose (self, poseIndex1, poseIndex2):
if (type(poseIndex1)==int):
dist = 0.0
for i in range(poseIndex1+1, poseIndex2+1):
cpose = self.table[i]
ppose = self.table[i-1]
cdist = np.linalg.norm([cpose.x-ppose.x, cpose.y-ppose.y, cpose.z-ppose.z], 2)
dist += cdist
return dist
def lengthFrom2Times(self, time1, time2):
pose1 = self.findNearestInTime(time1, 0.25)
idx1 = self.table.index(pose1)
pose2 = self.findNearestInTime(time2, 0.25)
idx2 = self.table.index(pose2)
return self.lengthFrom2Pose (idx1, idx2)
def subset (self, startIdx, stopIdx):
poseTblSubset = PoseTable()
for i in range(startIdx, stopIdx+1):
p = self.table[i]
poseTblSubset.append(p)
return poseTblSubset
def transform (self, dpose):
pass
def findBlankLengthFromGroundTruth (self, groundTruthTbl):
tolerance = 0.25
blankDistFront = 0
# Find blank distance in front
if groundTruthTbl[0].timestamp < self.table[0].timestamp:
pgrnd = groundTruthTbl.findNearestInTime (self.table[0].timestamp, tolerance)
idx = groundTruthTbl.table.index(pgrnd)
blankDistFront = groundTruthTbl.lengthFrom2Pose (0, idx)
else:
blankDistFront = 0
# Find blank distance in rear
blankDistRear = 0
if (groundTruthTbl.last().timestamp > self.table[-1].timestamp):
pgrnd = groundTruthTbl.findNearestInTime (self.table[-1].timestamp, tolerance)
idx = groundTruthTbl.table.index (pgrnd)
blankDistRear = groundTruthTbl.lengthFrom2Pose (idx, len(groundTruthTbl)-1)
else:
blankDistRear = 0
# Find blank distances in middle
blankPoses = self.findBlankTime(tolerance)
blankDistMid = 0
for bPose in blankPoses:
d = groundTruthTbl.lengthFrom2Times (bPose[0].timestamp, bPose[1].timestamp)
blankDistMid += d
return blankDistFront + blankDistMid + blankDistRear
def saveToTfBag (self, bagFileName, parentFrame, childFrame, append=False):
from tf2_msgs.msg import TFMessage
from std_msgs.msg import Header
from geometry_msgs.msg import Transform, TransformStamped, Vector3, Quaternion
import rosbag
def create_tf_message (pose):
header = Header (stamp=rospy.Time(pose.timestamp))
tfmsg = TFMessage(transforms = [TransformStamped()])
tfmsg.transforms[0].header = copy(header)
tfmsg.transforms[0].header.frame_id = parentFrame
tfmsg.transforms[0].child_frame_id = childFrame
tfmsg.transforms[0].transform = Transform()
tfmsg.transforms[0].transform.translation = Vector3(pose.x, pose.y, pose.z)
tfmsg.transforms[0].transform.rotation = Quaternion(x=pose.qx, \
y=pose.qy,
z=pose.qz,
w=pose.qw)
return tfmsg
bagfile = None
if (append==False):
bagfile = rosbag.Bag(bagFileName, mode='w')
else :
bagfile = rosbag.Bag(bagFileName, mode='a')
i = 0
for pose in self.table:
tfmsg = create_tf_message(pose)
bagfile.write('/tf', tfmsg, t=rospy.Time.from_sec(pose.msgTimestamp))
print ("{} / {}".format(i, len(self.table)))
i+=1
bagfile.close()
def saveToPoseStampedBag (self, filename, topic, frame, append=False):
bagfile = None
if (append==False) :
bagfile = rosbag.Bag (filename, mode='w')
else:
bagfile = rosbag.Bag (filename, mode='a')
for pose in self.table:
posemsg = PoseStamped()
posemsg.header.frame_id = frame
posemsg.header.stamp = rospy.Time.from_sec(pose.timestamp)
posemsg.pose.position.x = pose.x
posemsg.pose.position.y = pose.y
posemsg.pose.position.z = pose.z
posemsg.pose.orientation.x = pose.qx
posemsg.pose.orientation.y = pose.qy
posemsg.pose.orientation.z = pose.qz
posemsg.pose.orientation.w = pose.qw
bagfile.write(topic, posemsg, t=rospy.Time.from_sec(pose.timestamp))
bagfile.close()
@staticmethod
def loadSegwayStatusFromBag (bagFilename, limitMsg=0) :
segwayPose = PoseTable()
bagsrc = rosbag.Bag(bagFilename, mode='r')
cPose = Pose()
cPose.theta = 0.0
i = 0
for topic, msg, timestamp in bagsrc.read_messages():
try:
if cPose.timestamp == 0:
cPose.timestamp = timestamp.to_sec()
continue
x, y, theta = cPose.segwayMove(timestamp.to_sec(),
msg.segway.left_wheel_velocity,
msg.segway.right_wheel_velocity,
msg.segway.yaw_rate)
cPose.x = x
cPose.y = y
cPose.theta = theta
cPose.timestamp = timestamp.to_sec()
segwayPose.append (copy(cPose))
i += 1
if (limitMsg!=0 and i>=limitMsg):
break
print (i)
except KeyError:
continue
return segwayPose
def increaseTimeResolution (self, numToAdd=10):
NewPoseTable = PoseTable()
for i in range(len(self)-1) :
p1 = self[i]
p2 = self[i+1]
NewPoseTable.append(p1)
rt = 1.0 / float(numToAdd)
j = 0.0
while (j < 1.0) :
j += rt
pnew = Pose.interpolate(p1, p2, j)
NewPoseTable.append(pnew)
NewPoseTable.append(p2)
return NewPoseTable
def joinPoseTables (*poseTbls):
#Find maximum & minimum time
mintimes = [ptb[0].timestamp for ptb in poseTbls]
startTime = min(mintimes)
maxtimes = [ptb.last().timestamp for ptb in poseTbls]
stopTime = max(maxtimes)
# Find optimal time resolution
def timeDiffs (poseTbl) :
diff = []
for p in range(1, len(poseTbl.table)):
cpose = poseTbl.table[p]
ppose = poseTbl.table[p-1]
diff.append(cpose.timestamp - ppose.timestamp)
return diff
poseTblDiffs = [timeDiffs(ptbl) for ptbl in poseTbls]
minDiffs = [min(td) for td in poseTblDiffs]
timeRez = min(minDiffs)
poseList = set()
for ptbl in poseTbls:
for pose in ptbl.table:
pose.parent = ptbl
poseList.add(pose)
allPosesList = sorted(poseList, key=lambda p: p.timestamp)
jointPoses = PoseTable()
for pose in allPosesList:
if pose not in poseList:
continue
cPoses = []
cPoses.append(pose)
for ptbl in poseTbls:
if (pose.parent==ptbl):
continue
friend = ptbl.findNearestInTime(pose.timestamp, 2*timeRez)
if friend != None and friend in poseList:
cPoses.append (friend)
poseAvg = Pose.average(*cPoses)
jointPoses.append(poseAvg)
for p in cPoses:
poseList.discard(p)
# For debugging progress
print ("Length: {} / {}".format(len(jointPoses), len(allPosesList)))
return jointPoses
def OrbFixOffline (orbLocalisationBagFilename, mapCsv):
offset = 1
orbLoc = PoseTable.loadFromBagFile(orbLocalisationBagFilename, 'ORB_SLAM/World', 'ORB_SLAM/Camera')
mapArray = np.loadtxt(mapCsv)
orbMapTbl = np.array([[r[0],r[1],r[2],r[3],r[4],r[5],r[6],r[7]] for r in mapArray])
orbMap = PoseTable.loadFromArray(orbMapTbl)
ndtMapTbl = np.array([[r[0],r[8],r[9],r[10],r[11],r[12],r[13],r[14]] for r in mapArray])
ndtMap = PoseTable.loadFromArray(ndtMapTbl)
for loc in orbLoc.table:
loc.kfId = orbMap.findNearestByDistance(loc, True)
loc.kf = orbMap[loc.kfId]
# Fix axes
for pose in orbLoc.table:
x=pose.x
y=pose.y
z=pose.z
pose.x=z
pose.y=-x
pose.z=-y
x=pose.kf.x
y=pose.kf.y
z=pose.kf.z
pose.kf.x=z
pose.kf.y=-x
pose.kf.z=-y
ndtPose = ndtMap[pose.kfId]
ndtPoseOffset = None
try:
ndtPoseOffset = ndtMap[pose.kfId-offset]
kfOffset = orbMap[pose.kfId-offset]
except KeyError:
continue
scale = np.linalg.norm(ndtPose.coord()-ndtPoseOffset.coord()) / \
np.linalg.norm(pose.kf.coord()-kfOffset.coord())
poseRel = Pose(0, ndtPose.x-pose.x, ndtPose.y-pose.y, ndtPose.z-pose.z)
pose.cx = ndtPose.x + scale*poseRel.x
pose.cy = ndtPose.y + scale*poseRel.y
pose.cz = ndtPose.z + scale.poseRel.z
return orbLoc, orbMap, ndtMap
# Custom bag reader class
class BagReader (rosbag.Bag):
def __init__ (self, bagpath, topicname=None):
super(BagReader, self).__init__(bagpath, 'r')
self.rTopicName = topicname
def readByTime (self, second):
rtm = rospy.Time.from_sec(second)
if self.rTopicName is not None:
for topic,msg,time in self.read_messages(topics=self.rTopicName, start_time=rtm):
if msg is None:
raise KeyError("Message not found in that time")
return copy(msg)
def readByCount (self, counter):
pass
def formatResultAsRecords (resultMat):
records = PoseTable()
for r in range(len(resultMat)) :
id = int (resultMat[r][0])
pose = Pose(resultMat[r][1:])
records[id] = pose
return records
def flipOrbToNdt (orbPose):
qOrb = [orbPose.qx, orbPose.qy, orbPose.qz, orbPose.qw]
orbFlip = trafo.concatenate_matrices(
trafo.quaternion_matrix(qOrb),
trafo.rotation_matrix(np.pi/2, (1,0,0)),
trafo.rotation_matrix(np.pi/2, (0,0,1))
)
return trafo.quaternion_from_matrix(orbFlip)
def readMessage (bag, topic, timestamp):
tm = rospy.Time.from_sec(timestamp)
for topic, msg, time in bag.read_messages(topics=topic, start_time=tm):
return msg
if __name__ == '__main__' :
poseBag = PoseTable.loadFromPoseStampedBag("/home/sujiwo/Tsukuba2016/data/tsukuba/2016-10-16-14-36-13/ground_truth.bag", '/filtered_ndt_current_pose')
subset3500 = poseBag.subsetByTime(poseBag[3500].timestamp, 300)
pass
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.monte
~~~~~~~~~~~~~~~~~~~~~
Lexer for the Monte programming language.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \
Punctuation, String, Whitespace
from pygments.lexer import RegexLexer, include, words
__all__ = ['MonteLexer']
# `var` handled separately
# `interface` handled separately
_declarations = ['bind', 'def', 'fn', 'object']
_methods = ['method', 'to']
_keywords = [
'as', 'break', 'catch', 'continue', 'else', 'escape', 'exit', 'exports',
'extends', 'finally', 'for', 'guards', 'if', 'implements', 'import',
'in', 'match', 'meta', 'pass', 'return', 'switch', 'try', 'via', 'when',
'while',
]
_operators = [
# Unary
'~', '!',
# Binary
'+', '-', '*', '/', '%', '**', '&', '|', '^', '<<', '>>',
# Binary augmented
'+=', '-=', '*=', '/=', '%=', '**=', '&=', '|=', '^=', '<<=', '>>=',
# Comparison
'==', '!=', '<', '<=', '>', '>=', '<=>',
# Patterns and assignment
':=', '?', '=~', '!~', '=>',
# Calls and sends
'.', '<-', '->',
]
_escape_pattern = (
r'(?:\\x[0-9a-fA-F]{2}|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
r'\\["\'\\bftnr])')
# _char = _escape_chars + [('.', String.Char)]
_identifier = r'[_a-zA-Z]\w*'
_constants = [
# Void constants
'null',
# Bool constants
'false', 'true',
# Double constants
'Infinity', 'NaN',
# Special objects
'M', 'Ref', 'throw', 'traceln',
]
_guards = [
'Any', 'Binding', 'Bool', 'Bytes', 'Char', 'DeepFrozen', 'Double',
'Empty', 'Int', 'List', 'Map', 'Near', 'NullOk', 'Same', 'Selfless',
'Set', 'Str', 'SubrangeGuard', 'Transparent', 'Void',
]
_safeScope = [
'_accumulateList', '_accumulateMap', '_auditedBy', '_bind',
'_booleanFlow', '_comparer', '_equalizer', '_iterForever', '_loop',
'_makeBytes', '_makeDouble', '_makeFinalSlot', '_makeInt', '_makeList',
'_makeMap', '_makeMessageDesc', '_makeOrderedSpace', '_makeParamDesc',
'_makeProtocolDesc', '_makeSourceSpan', '_makeString', '_makeVarSlot',
'_makeVerbFacet', '_mapExtract', '_matchSame', '_quasiMatcher',
'_slotToBinding', '_splitList', '_suchThat', '_switchFailed',
'_validateFor', 'b__quasiParser', 'eval', 'import', 'm__quasiParser',
'makeBrandPair', 'makeLazySlot', 'safeScope', 'simple__quasiParser',
]
class MonteLexer(RegexLexer):
"""
Lexer for the `Monte <https://monte.readthedocs.io/>`_ programming language.
.. versionadded:: 2.2
"""
name = 'Monte'
aliases = ['monte']
filenames = ['*.mt']
tokens = {
'root': [
# Comments
(r'#[^\n]*\n', Comment),
# Docstrings
# Apologies for the non-greedy matcher here.
(r'/\*\*.*?\*/', String.Doc),
# `var` declarations
(r'\bvar\b', Keyword.Declaration, 'var'),
# `interface` declarations
(r'\binterface\b', Keyword.Declaration, 'interface'),
# method declarations
(words(_methods, prefix='\\b', suffix='\\b'),
Keyword, 'method'),
# All other declarations
(words(_declarations, prefix='\\b', suffix='\\b'),
Keyword.Declaration),
# Keywords
(words(_keywords, prefix='\\b', suffix='\\b'), Keyword),
# Literals
('[+-]?0x[_0-9a-fA-F]+', Number.Hex),
(r'[+-]?[_0-9]+\.[_0-9]*([eE][+-]?[_0-9]+)?', Number.Float),
('[+-]?[_0-9]+', Number.Integer),
("'", String.Double, 'char'),
('"', String.Double, 'string'),
# Quasiliterals
('`', String.Backtick, 'ql'),
# Operators
(words(_operators), Operator),
# Verb operators
(_identifier + '=', Operator.Word),
# Safe scope constants
(words(_constants, prefix='\\b', suffix='\\b'),
Keyword.Pseudo),
# Safe scope guards
(words(_guards, prefix='\\b', suffix='\\b'), Keyword.Type),
# All other safe scope names
(words(_safeScope, prefix='\\b', suffix='\\b'),
Name.Builtin),
# Identifiers
(_identifier, Name),
# Punctuation
(r'\(|\)|\{|\}|\[|\]|:|,', Punctuation),
# Whitespace
(' +', Whitespace),
# Definite lexer errors
('=', Error),
],
'char': [
# It is definitely an error to have a char of width == 0.
("'", Error, 'root'),
(_escape_pattern, String.Escape, 'charEnd'),
('.', String.Char, 'charEnd'),
],
'charEnd': [
("'", String.Char, '#pop:2'),
# It is definitely an error to have a char of width > 1.
('.', Error),
],
# The state of things coming into an interface.
'interface': [
(' +', Whitespace),
(_identifier, Name.Class, '#pop'),
include('root'),
],
# The state of things coming into a method.
'method': [
(' +', Whitespace),
(_identifier, Name.Function, '#pop'),
include('root'),
],
'string': [
('"', String.Double, 'root'),
(_escape_pattern, String.Escape),
(r'\n', String.Double),
('.', String.Double),
],
'ql': [
('`', String.Backtick, 'root'),
(r'\$' + _escape_pattern, String.Escape),
(r'\$\$', String.Escape),
(r'@@', String.Escape),
(r'\$\{', String.Interpol, 'qlNest'),
(r'@\{', String.Interpol, 'qlNest'),
(r'\$' + _identifier, Name),
('@' + _identifier, Name),
('.', String.Backtick),
],
'qlNest': [
(r'\}', String.Interpol, '#pop'),
include('root'),
],
# The state of things immediately following `var`.
'var': [
(' +', Whitespace),
(_identifier, Name.Variable, '#pop'),
include('root'),
],
}
|
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import sys
# This is the list of tests to run. It is a dictionary with the following
# fields:
#
# name (required): The name of the step, to show on the buildbot status page.
# path (required): The path to the executable which runs the tests.
# additional_args (optional): An array of optional arguments.
# uses_app_engine_sdk (optional): True if app engine SDK must be in PYTHONPATH.
# uses_sandbox_env (optional): True if CHROME_DEVEL_SANDBOX must be in
# environment.
# disabled (optional): List of platforms the test is disabled on. May contain
# 'win', 'mac', 'linux', or 'android'.
# outputs_presentation_json (optional): If True, pass in --presentation-json
# argument to the test executable to allow it to update the buildbot status
# page. More details here:
# github.com/luci/recipes-py/blob/master/recipe_modules/generator_script/api.py
_CATAPULT_TESTS = [
{
'name': 'BattOr Smoke Tests',
'path': 'common/battor/battor/battor_wrapper_devicetest.py',
'disabled': ['android'],
},
{
'name': 'BattOr Unit Tests',
'path': 'common/battor/bin/run_py_tests',
'disabled': ['android'],
},
{
'name': 'Build Python Tests',
'path': 'catapult_build/bin/run_py_tests',
'disabled': ['android'],
},
{
'name': 'Common Tests',
'path': 'common/bin/run_tests',
},
{
'name': 'Dashboard Dev Server Tests Canary',
'path': 'dashboard/bin/run_dev_server_tests',
'additional_args': [
'--no-install-hooks',
'--no-use-local-chrome',
'--channel=canary'
],
'outputs_presentation_json': True,
'disabled': ['android'],
},
{
'name': 'Dashboard Dev Server Tests Stable',
'path': 'dashboard/bin/run_dev_server_tests',
'additional_args': [
'--no-install-hooks',
'--no-use-local-chrome',
'--channel=stable',
],
'outputs_presentation_json': True,
'disabled': ['android'],
},
{
'name': 'Dashboard Python Tests',
'path': 'dashboard/bin/run_py_tests',
'additional_args': ['--no-install-hooks'],
'uses_app_engine_sdk': True,
'disabled': ['android'],
},
{
'name': 'Dependency Manager Tests',
'path': 'dependency_manager/bin/run_tests',
},
{
'name': 'Devil Device Tests',
'path': 'devil/bin/run_py_devicetests',
'disabled': ['win', 'mac', 'linux']
},
{
'name': 'Devil Python Tests',
'path': 'devil/bin/run_py_tests',
'disabled': ['mac', 'win'],
},
{
'name': 'eslint Tests',
'path': 'common/eslint/bin/run_tests',
'disabled': ['android'],
},
{
'name': 'Native Heap Symbolizer Tests',
'path': 'tracing/bin/run_symbolizer_tests',
'disabled': ['android'],
},
{
'name': 'Py-vulcanize Tests',
'path': 'third_party/py_vulcanize/bin/run_py_tests',
'additional_args': ['--no-install-hooks'],
'disabled': ['android'],
},
{
'name': 'Systrace Tests',
'path': 'systrace/bin/run_tests',
},
{
'name': 'Telemetry Tests with Stable Browser (Desktop)',
'path': 'catapult_build/fetch_telemetry_deps_and_run_tests',
'additional_args': [
'--browser=reference',
'--start-xvfb'
],
'uses_sandbox_env': True,
'disabled': ['android'],
},
{
'name': 'Telemetry Tests with Stable Browser (Android)',
'path': 'catapult_build/fetch_telemetry_deps_and_run_tests',
'additional_args': [
'--browser=reference',
'--device=android',
'--jobs=1'
],
'uses_sandbox_env': True,
'disabled': ['win', 'mac', 'linux']
},
{
'name': 'Telemetry Integration Tests with Stable Browser',
'path': 'telemetry/bin/run_browser_tests',
'additional_args': [
'SimpleBrowserTest',
'--browser=reference',
],
'uses_sandbox_env': True,
'disabled': ['android', 'linux'], # TODO(nedn): enable this on linux
},
{
'name': 'Tracing Dev Server Tests Canary',
'path': 'tracing/bin/run_dev_server_tests',
'additional_args': [
'--no-install-hooks',
'--no-use-local-chrome',
'--channel=canary'
],
'outputs_presentation_json': True,
'disabled': ['android'],
},
{
'name': 'Tracing Dev Server Tests Stable',
'path': 'tracing/bin/run_dev_server_tests',
'additional_args': [
'--no-install-hooks',
'--no-use-local-chrome',
'--channel=stable',
],
'outputs_presentation_json': True,
'disabled': ['android'],
},
{
'name': 'Tracing D8 Tests',
'path': 'tracing/bin/run_vinn_tests',
'disabled': ['android'],
},
{
'name': 'Tracing Python Tests',
'path': 'tracing/bin/run_py_tests',
'additional_args': ['--no-install-hooks'],
'disabled': ['android'],
},
{
'name': 'Vinn Tests',
'path': 'third_party/vinn/bin/run_tests',
'disabled': ['android'],
},
{
'name': 'NetLog Viewer Dev Server Tests',
'path': 'netlog_viewer/bin/run_dev_server_tests',
'additional_args': [
'--no-install-hooks',
'--no-use-local-chrome',
],
'disabled': ['android', 'win', 'mac', 'linux'],
},
]
_STALE_FILE_TYPES = ['.pyc', '.pseudo_lock']
def main(args=None):
"""Send list of test to run to recipes generator_script.
See documentation at:
github.com/luci/recipes-py/blob/master/recipe_modules/generator_script/api.py
"""
parser = argparse.ArgumentParser(description='Run catapult tests.')
parser.add_argument('--api-path-checkout', help='Path to catapult checkout')
parser.add_argument('--app-engine-sdk-pythonpath',
help='PYTHONPATH to include app engine SDK path')
parser.add_argument('--platform',
help='Platform name (linux, mac, or win)')
parser.add_argument('--output-json', help='Output for buildbot status page')
args = parser.parse_args(args)
steps = [{
# Always remove stale files first. Not listed as a test above
# because it is a step and not a test, and must be first.
'name': 'Remove Stale files',
'cmd': ['python',
os.path.join(args.api_path_checkout,
'catapult_build', 'remove_stale_files.py'),
args.api_path_checkout, ','.join(_STALE_FILE_TYPES)]
}]
if args.platform == 'android':
# On Android, we need to prepare the devices a bit before using them in
# tests. These steps are not listed as tests above because they aren't
# tests and because they must precede all tests.
steps.extend([
{
'name': 'Android: Recover Devices',
'cmd': ['python',
os.path.join(args.api_path_checkout, 'devil', 'devil',
'android', 'tools', 'device_recovery.py')],
},
{
'name': 'Android: Provision Devices',
'cmd': ['python',
os.path.join(args.api_path_checkout, 'devil', 'devil',
'android', 'tools', 'provision_devices.py')],
},
{
'name': 'Android: Device Status',
'cmd': ['python',
os.path.join(args.api_path_checkout, 'devil', 'devil',
'android', 'tools', 'device_status.py')],
},
])
for test in _CATAPULT_TESTS:
if args.platform in test.get('disabled', []):
continue
step = {
'name': test['name'],
'env': {}
}
step['cmd'] = ['python', os.path.join(args.api_path_checkout, test['path'])]
if step['name'] == 'Systrace Tests':
step['cmd'] += ['--device=' + args.platform]
if test.get('additional_args'):
step['cmd'] += test['additional_args']
if test.get('uses_app_engine_sdk'):
step['env']['PYTHONPATH'] = args.app_engine_sdk_pythonpath
if test.get('uses_sandbox_env'):
step['env']['CHROME_DEVEL_SANDBOX'] = '/opt/chromium/chrome_sandbox'
if test.get('outputs_presentation_json'):
step['outputs_presentation_json'] = True
steps.append(step)
with open(args.output_json, 'w') as outfile:
json.dump(steps, outfile)
if __name__ == '__main__':
main(sys.argv[1:])
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from datetime import datetime
from warnings import warn
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils import timezone
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from paypal.standard.conf import (
BUY_BUTTON_IMAGE, DONATION_BUTTON_IMAGE, PAYPAL_CERT, PAYPAL_CERT_ID, PAYPAL_PRIVATE_CERT, PAYPAL_PUBLIC_CERT,
POSTBACK_ENDPOINT, SANDBOX_POSTBACK_ENDPOINT, SUBSCRIPTION_BUTTON_IMAGE
)
from paypal.standard.widgets import ValueHiddenInput
from paypal.utils import warn_untested
log = logging.getLogger(__name__)
# PayPal date format e.g.:
# 20:18:05 Jan 30, 2009 PST
#
# PayPal dates have been spotted in the wild with these formats, beware!
#
# %H:%M:%S %b. %d, %Y PST
# %H:%M:%S %b. %d, %Y PDT
# %H:%M:%S %b %d, %Y PST
# %H:%M:%S %b %d, %Y PDT
#
# To avoid problems with different locales, we don't rely on datetime.strptime,
# which is locale dependent, but do custom parsing in PayPalDateTimeField
MONTHS = [
'Jan', 'Feb', 'Mar', 'Apr',
'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec',
]
class PayPalDateTimeField(forms.DateTimeField):
def to_python(self, value):
if value in self.empty_values:
return None
if isinstance(value, datetime):
return value
value = value.strip()
try:
time_part, month_part, day_part, year_part, zone_part = value.split()
month_part = month_part.strip(".")
day_part = day_part.strip(",")
month = MONTHS.index(month_part) + 1
day = int(day_part)
year = int(year_part)
hour, minute, second = map(int, time_part.split(":"))
dt = datetime(year, month, day, hour, minute, second)
except ValueError as e:
raise ValidationError(
_("Invalid date format %(value)s: %(e)s"),
params={'value': value, 'e': e},
code="invalid_date"
)
if zone_part in ["PDT", "PST"]:
# PST/PDT is 'US/Pacific'
dt = timezone.pytz.timezone('US/Pacific').localize(
dt, is_dst=zone_part == 'PDT')
if not settings.USE_TZ:
dt = timezone.make_naive(dt, timezone=timezone.utc)
return dt
class PayPalPaymentsForm(forms.Form):
"""
Creates a PayPal Payments Standard "Buy It Now" button, configured for a
selling a single item with no shipping.
For a full overview of all the fields you can set (there is a lot!) see:
http://tinyurl.com/pps-integration
Usage:
>>> f = PayPalPaymentsForm(initial={'item_name':'Widget 001', ...})
>>> f.render()
u'<form action="https://www.paypal.com/cgi-bin/webscr" method="post"> ...'
"""
CMD_CHOICES = (
("_xclick", "Buy now or Donations"),
("_donations", "Donations"),
("_cart", "Shopping cart"),
("_xclick-subscriptions", "Subscribe"),
("_xclick-auto-billing", "Automatic Billing"),
("_xclick-payment-plan", "Installment Plan"),
)
SHIPPING_CHOICES = ((1, "No shipping"), (0, "Shipping"))
NO_NOTE_CHOICES = ((1, "No Note"), (0, "Include Note"))
RECURRING_PAYMENT_CHOICES = (
(1, "Subscription Payments Recur"),
(0, "Subscription payments do not recur")
)
REATTEMPT_ON_FAIL_CHOICES = (
(1, "reattempt billing on Failure"),
(0, "Do Not reattempt on failure")
)
BUY = 'buy'
SUBSCRIBE = 'subscribe'
DONATE = 'donate'
# Default fields.
cmd = forms.ChoiceField(widget=forms.HiddenInput(), initial=CMD_CHOICES[0][0])
charset = forms.CharField(widget=forms.HiddenInput(), initial="utf-8")
currency_code = forms.CharField(widget=forms.HiddenInput(), initial="USD")
no_shipping = forms.ChoiceField(widget=forms.HiddenInput(), choices=SHIPPING_CHOICES,
initial=SHIPPING_CHOICES[0][0])
def __init__(self, button_type="buy", *args, **kwargs):
super(PayPalPaymentsForm, self).__init__(*args, **kwargs)
self.button_type = button_type
if 'initial' in kwargs:
kwargs['initial'] = self._fix_deprecated_return_url(kwargs['initial'])
# Dynamically create, so we can support everything PayPal does.
for k, v in kwargs['initial'].items():
if k not in self.base_fields:
self.fields[k] = forms.CharField(label=k, widget=ValueHiddenInput(), initial=v)
def _fix_deprecated_return_url(self, initial_args):
if 'return_url' in initial_args:
warn("""The use of the initial['return_url'] is Deprecated.
Please use initial['return'] instead""", DeprecationWarning)
initial_args['return'] = initial_args['return_url']
del initial_args['return_url']
return initial_args
def test_mode(self):
return getattr(settings, 'PAYPAL_TEST', True)
def get_endpoint(self):
"Returns the endpoint url for the form."
if self.test_mode():
return SANDBOX_POSTBACK_ENDPOINT
else:
return POSTBACK_ENDPOINT
def render(self):
return format_html(u"""<form action="{0}" method="post">
{1}
<input type="image" src="{2}" border="0" name="submit" alt="Buy it Now" />
</form>""", self.get_endpoint(), self.as_p(), self.get_image())
def get_image(self):
return {
self.SUBSCRIBE: SUBSCRIPTION_BUTTON_IMAGE,
self.BUY: BUY_BUTTON_IMAGE,
self.DONATE: DONATION_BUTTON_IMAGE,
}[self.button_type]
def is_transaction(self):
warn_untested()
return not self.is_subscription()
def is_donation(self):
warn_untested()
return self.button_type == self.DONATE
def is_subscription(self):
warn_untested()
return self.button_type == self.SUBSCRIBE
class PayPalEncryptedPaymentsForm(PayPalPaymentsForm):
"""
Creates a PayPal Encrypted Payments "Buy It Now" button.
Requires the M2Crypto package.
Based on example at:
http://blog.mauveweb.co.uk/2007/10/10/paypal-with-django/
"""
def __init__(self, private_cert=PAYPAL_PRIVATE_CERT, public_cert=PAYPAL_PUBLIC_CERT,
paypal_cert=PAYPAL_CERT, cert_id=PAYPAL_CERT_ID, *args, **kwargs):
super(PayPalEncryptedPaymentsForm, self).__init__(*args, **kwargs)
self.private_cert = private_cert
self.public_cert = public_cert
self.paypal_cert = paypal_cert
self.cert_id = cert_id
def _encrypt(self):
"""Use your key thing to encrypt things."""
from M2Crypto import BIO, SMIME, X509
# Iterate through the fields and pull out the ones that have a value.
plaintext = 'cert_id=%s\n' % self.cert_id
for name, field in self.fields.items():
value = None
if name in self.initial:
value = self.initial[name]
elif field.initial is not None:
value = field.initial
if value is not None:
plaintext += u'%s=%s\n' % (name, value)
plaintext = plaintext.encode('utf-8')
# Begin crypto weirdness.
s = SMIME.SMIME()
s.load_key_bio(BIO.openfile(self.private_cert), BIO.openfile(self.public_cert))
p7 = s.sign(BIO.MemoryBuffer(plaintext), flags=SMIME.PKCS7_BINARY)
x509 = X509.load_cert_bio(BIO.openfile(self.paypal_cert))
sk = X509.X509_Stack()
sk.push(x509)
s.set_x509_stack(sk)
s.set_cipher(SMIME.Cipher('des_ede3_cbc'))
tmp = BIO.MemoryBuffer()
p7.write_der(tmp)
p7 = s.encrypt(tmp, flags=SMIME.PKCS7_BINARY)
out = BIO.MemoryBuffer()
p7.write(out)
return out.read().decode()
def as_p(self):
return mark_safe(u"""
<input type="hidden" name="cmd" value="_s-xclick" />
<input type="hidden" name="encrypted" value="%s" />
""" % self._encrypt())
class PayPalSharedSecretEncryptedPaymentsForm(PayPalEncryptedPaymentsForm):
"""
Creates a PayPal Encrypted Payments "Buy It Now" button with a Shared Secret.
Shared secrets should only be used when your IPN endpoint is on HTTPS.
Adds a secret to the notify_url based on the contents of the form.
"""
def __init__(self, *args, **kwargs):
"Make the secret from the form initial data and slip it into the form."
warn_untested()
from paypal.standard.helpers import make_secret
super(PayPalSharedSecretEncryptedPaymentsForm, self).__init__(*args, **kwargs)
# @@@ Attach the secret parameter in a way that is safe for other query params.
secret_param = "?secret=%s" % make_secret(self)
# Initial data used in form construction overrides defaults
if 'notify_url' in self.initial:
self.initial['notify_url'] += secret_param
else:
self.fields['notify_url'].initial += secret_param
class PayPalStandardBaseForm(forms.ModelForm):
"""Form used to receive and record PayPal IPN/PDT."""
# PayPal dates have non-standard formats.
time_created = PayPalDateTimeField(required=False)
payment_date = PayPalDateTimeField(required=False)
next_payment_date = PayPalDateTimeField(required=False)
subscr_date = PayPalDateTimeField(required=False)
subscr_effective = PayPalDateTimeField(required=False)
retry_at = PayPalDateTimeField(required=False)
case_creation_date = PayPalDateTimeField(required=False)
auction_closing_date = PayPalDateTimeField(required=False)
|
|
import logging
import os
import shutil
from django.conf import settings
from readthedocs.doc_builder.config import ConfigWrapper
from readthedocs.doc_builder.loader import get_builder_class
from readthedocs.projects.constants import LOG_TEMPLATE
log = logging.getLogger(__name__)
class PythonEnvironment(object):
def __init__(self, version, build_env, config=None):
self.version = version
self.project = version.project
self.build_env = build_env
if config:
self.config = config
else:
self.config = ConfigWrapper(version=version, yaml_config={})
# Compute here, since it's used a lot
self.checkout_path = self.project.checkout_path(self.version.slug)
def _log(self, msg):
log.info(LOG_TEMPLATE
.format(project=self.project.slug,
version=self.version.slug,
msg=msg))
def delete_existing_build_dir(self):
# Handle deleting old build dir
build_dir = os.path.join(
self.venv_path(),
'build')
if os.path.exists(build_dir):
self._log('Removing existing build directory')
shutil.rmtree(build_dir)
def install_package(self):
setup_path = os.path.join(self.checkout_path, 'setup.py')
if os.path.isfile(setup_path) and self.config.install_project:
if self.config.pip_install or getattr(settings, 'USE_PIP_INSTALL', False):
self.build_env.run(
'python',
self.venv_bin(filename='pip'),
'install',
'--ignore-installed',
'--cache-dir',
self.project.pip_cache_path,
'.',
cwd=self.checkout_path,
bin_path=self.venv_bin()
)
else:
self.build_env.run(
'python',
'setup.py',
'install',
'--force',
cwd=self.checkout_path,
bin_path=self.venv_bin()
)
def venv_bin(self, filename=None):
"""Return path to the virtualenv bin path, or a specific binary
:param filename: If specified, add this filename to the path return
:returns: Path to virtualenv bin or filename in virtualenv bin
"""
parts = [self.venv_path(), 'bin']
if filename is not None:
parts.append(filename)
return os.path.join(*parts)
class Virtualenv(PythonEnvironment):
def venv_path(self):
return os.path.join(self.project.doc_path, 'envs', self.version.slug)
def setup_base(self):
site_packages = '--no-site-packages'
if self.config.use_system_site_packages:
site_packages = '--system-site-packages'
env_path = self.venv_path()
self.build_env.run(
self.config.python_interpreter,
'-mvirtualenv',
site_packages,
env_path,
bin_path=None, # Don't use virtualenv bin that doesn't exist yet
)
def install_core_requirements(self):
requirements = [
'sphinx==1.3.5',
'Pygments==2.1.3',
'setuptools==20.1.1',
'docutils==0.12',
'mkdocs==0.15.0',
'mock==1.0.1',
'pillow==2.6.1',
('git+https://github.com/rtfd/readthedocs-sphinx-ext.git'
'@0.6-alpha#egg=readthedocs-sphinx-ext'),
'sphinx-rtd-theme==0.1.9',
'alabaster>=0.7,<0.8,!=0.7.5',
'commonmark==0.5.4',
'recommonmark==0.1.1',
]
cmd = [
'python',
self.venv_bin(filename='pip'),
'install',
'--use-wheel',
'-U',
'--cache-dir',
self.project.pip_cache_path,
]
if self.config.use_system_site_packages:
# Other code expects sphinx-build to be installed inside the
# virtualenv. Using the -I option makes sure it gets installed
# even if it is already installed system-wide (and
# --system-site-packages is used)
cmd.append('-I')
cmd.extend(requirements)
self.build_env.run(
*cmd,
bin_path=self.venv_bin()
)
def install_user_requirements(self):
requirements_file_path = self.config.requirements_file
if not requirements_file_path:
builder_class = get_builder_class(self.project.documentation_type)
docs_dir = (builder_class(build_env=self.build_env, python_env=self)
.docs_dir())
for path in [docs_dir, '']:
for req_file in ['pip_requirements.txt', 'requirements.txt']:
test_path = os.path.join(self.checkout_path, path, req_file)
if os.path.exists(test_path):
requirements_file_path = test_path
break
if requirements_file_path:
self.build_env.run(
'python',
self.venv_bin(filename='pip'),
'install',
'--exists-action=w',
'--cache-dir',
self.project.pip_cache_path,
'-r{0}'.format(requirements_file_path),
cwd=self.checkout_path,
bin_path=self.venv_bin()
)
class Conda(PythonEnvironment):
def venv_path(self):
return os.path.join(self.project.doc_path, 'conda', self.version.slug)
def setup_base(self):
conda_env_path = os.path.join(self.project.doc_path, 'conda')
version_path = os.path.join(conda_env_path, self.version.slug)
if os.path.exists(version_path):
# Re-create conda directory each time to keep fresh state
self._log('Removing existing conda directory')
shutil.rmtree(version_path)
self.build_env.run(
'conda',
'env',
'create',
'--name',
self.version.slug,
'--file',
self.config.conda_file,
bin_path=None, # Don't use conda bin that doesn't exist yet
)
def install_core_requirements(self):
# Use conda for requirements it packages
requirements = [
'sphinx==1.3.5',
'Pygments==2.1.1',
'docutils==0.12',
'mock',
'pillow==3.0.0',
'sphinx_rtd_theme==0.1.7',
'alabaster>=0.7,<0.8,!=0.7.5',
]
cmd = [
'conda',
'install',
'--yes',
'--name',
self.version.slug,
]
cmd.extend(requirements)
self.build_env.run(
*cmd
)
# Install pip-only things.
pip_requirements = [
'mkdocs==0.15.0',
('git+https://github.com/rtfd/readthedocs-sphinx-ext.git'
'@0.6-alpha#egg=readthedocs-sphinx-ext'),
'commonmark==0.5.4',
'recommonmark==0.1.1',
]
pip_cmd = [
'python',
self.venv_bin(filename='pip'),
'install',
'-U',
'--cache-dir',
self.project.pip_cache_path,
]
pip_cmd.extend(pip_requirements)
self.build_env.run(
*pip_cmd,
bin_path=self.venv_bin()
)
def install_user_requirements(self):
self.build_env.run(
'conda',
'env',
'update',
'--name',
self.version.slug,
'--file',
self.config.conda_file,
)
|
|
"""``tornado.gen`` is a generator-based interface to make it easier to
work in an asynchronous environment. Code using the ``gen`` module
is technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler:
.. testcode::
class AsyncHandler(RequestHandler):
@asynchronous
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=self.on_fetch)
def on_fetch(self, response):
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
could be written with ``gen`` as:
.. testcode::
class GenAsyncHandler(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("http://example.com")
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its `~.Future.result`.
You can also yield a list or dict of ``Futures``, which will be
started at the same time and run in parallel; a list or dict of results will
be returned when they are all finished:
.. testcode::
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response1, response2 = yield [http_client.fetch(url1),
http_client.fetch(url2)]
response_dict = yield dict(response3=http_client.fetch(url3),
response4=http_client.fetch(url4))
response3 = response_dict['response3']
response4 = response_dict['response4']
.. testoutput::
:hide:
If the `~functools.singledispatch` library is available (standard in
Python 3.4, available via the `singledispatch
<https://pypi.python.org/pypi/singledispatch>`_ package on older
versions), additional types of objects may be yielded. Tornado includes
support for ``asyncio.Future`` and Twisted's ``Deferred`` class when
``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported.
See the `convert_yielded` function to extend this mechanism.
.. versionchanged:: 3.2
Dict support added.
.. versionchanged:: 4.1
Support added for yielding ``asyncio`` Futures and Twisted Deferreds
via ``singledispatch``.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import functools
import itertools
import sys
import types
import weakref
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
from tornado.util import raise_exc_info
try:
from functools import singledispatch # py34+
except ImportError as e:
try:
from singledispatch import singledispatch # backport
except ImportError:
singledispatch = None
class KeyReuseError(Exception):
pass
class UnknownKeyError(Exception):
pass
class LeakedCallbackError(Exception):
pass
class BadYieldError(Exception):
pass
class ReturnValueIgnoredError(Exception):
pass
class TimeoutError(Exception):
"""Exception raised by ``with_timeout``."""
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(future.result(),))
# The engine interface doesn't give us any way to return
# errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved.
future.add_done_callback(stack_context.wrap(final_callback))
return wrapper
def coroutine(func, replace_callback=True):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
in either this decorator or `engine`.
Coroutines may "return" by raising the special exception
`Return(value) <Return>`. In Python 3.3+, it is also possible for
the function to simply use the ``return value`` statement (prior to
Python 3.3 generators were not allowed to also return values).
In all versions of Python a coroutine that simply wishes to exit
early may use the ``return`` statement without a value.
Functions with this decorator return a `.Future`. Additionally,
they may be called with a ``callback`` keyword argument, which
will be invoked with the future's result when it resolves. If the
coroutine fails, the callback will not be run and an exception
will be raised into the surrounding `.StackContext`. The
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
.. warning::
When exceptions occur inside a coroutine, the exception
information will be stored in the `.Future` object. You must
examine the result of the `.Future` object, or the exception
may go unnoticed by your code. This means yielding the function
if called from another coroutine, using something like
`.IOLoop.run_sync` for top-level calls, or passing the `.Future`
to `.IOLoop.add_future`.
"""
return _make_coroutine_wrapper(func, replace_callback=True)
def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
The two decorators differ in their treatment of the ``callback``
argument, so we cannot simply implement ``@engine`` in terms of
``@coroutine``.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = TracebackFuture()
if replace_callback and 'callback' in kwargs:
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = getattr(e, 'value', None)
except Exception:
future.set_exc_info(sys.exc_info())
return future
else:
if isinstance(result, types.GeneratorType):
# Inline the first iteration of Runner.run. This lets us
# avoid the cost of creating a Runner when the coroutine
# never actually yields, which in turn allows us to
# use "optional" coroutines in critical path code without
# performance penalty for the synchronous case.
try:
orig_stack_contexts = stack_context._state.contexts
yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts:
yielded = TracebackFuture()
yielded.set_exception(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
future.set_result(getattr(e, 'value', None))
except Exception:
future.set_exc_info(sys.exc_info())
else:
Runner(result, future, yielded)
try:
return future
finally:
# Subtle memory optimization: if next() raised an exception,
# the future's exc_info contains a traceback which
# includes this stack frame. This creates a cycle,
# which will be collected at the next full GC but has
# been shown to greatly increase memory usage of
# benchmarks (relative to the refcount-based scheme
# used in the absence of cycles). We can avoid the
# cycle by clearing the local variable after we return it.
future = None
future.set_result(result)
return future
return wrapper
class Return(Exception):
"""Special exception to return a value from a `coroutine`.
If this exception is raised, its value argument is used as the
result of the coroutine::
@gen.coroutine
def fetch_json(url):
response = yield AsyncHTTPClient().fetch(url)
raise gen.Return(json_decode(response.body))
In Python 3.3, this exception is no longer necessary: the ``return``
statement can be used directly to return a value (previously
``yield`` and ``return`` with a value could not be combined in the
same function).
By analogy with the return statement, the value argument is optional,
but it is never necessary to ``raise gen.Return()``. The ``return``
statement can be used with no arguments instead.
"""
def __init__(self, value=None):
super(Return, self).__init__()
self.value = value
class WaitIterator(object):
"""Provides an iterator to yield the results of futures as they finish.
Yielding a set of futures like this:
``results = yield [future1, future2]``
pauses the coroutine until both ``future1`` and ``future2``
return, and then restarts the coroutine with the results of both
futures. If either future is an exception, the expression will
raise that exception and all the results will be lost.
If you need to get the result of each future as soon as possible,
or if you need the result of some futures even if others produce
errors, you can use ``WaitIterator``::
wait_iterator = gen.WaitIterator(future1, future2)
while not wait_iterator.done():
try:
result = yield wait_iterator.next()
except Exception as e:
print("Error {} from {}".format(e, wait_iterator.current_future))
else:
print("Result {} received from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
Because results are returned as soon as they are available the
output from the iterator *will not be in the same order as the
input arguments*. If you need to know which future produced the
current result, you can use the attributes
``WaitIterator.current_future``, or ``WaitIterator.current_index``
to get the index of the future from the input list. (if keyword
arguments were used in the construction of the `WaitIterator`,
``current_index`` will use the corresponding keyword).
.. versionadded:: 4.1
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise ValueError(
"You must provide args or kwargs, not both")
if kwargs:
self._unfinished = dict((f, k) for (k, f) in kwargs.items())
futures = list(kwargs.values())
else:
self._unfinished = dict((f, i) for (i, f) in enumerate(args))
futures = args
self._finished = collections.deque()
self.current_index = self.current_future = None
self._running_future = None
for future in futures:
future.add_done_callback(self._done_callback)
def done(self):
"""Returns True if this iterator has no more results."""
if self._finished or self._unfinished:
return False
# Clear the 'current' values when iteration is done.
self.current_index = self.current_future = None
return True
def next(self):
"""Returns a `.Future` that will yield the next available result.
Note that this `.Future` will not be the same object as any of
the inputs.
"""
self._running_future = TracebackFuture()
if self._finished:
self._return_result(self._finished.popleft())
return self._running_future
def _done_callback(self, done):
if self._running_future and not self._running_future.done():
self._return_result(done)
else:
self._finished.append(done)
def _return_result(self, done):
"""Called set the returned future's state that of the future
we yielded, and set the current future for the iterator.
"""
chain_future(done, self._running_future)
self.current_future = done
self.current_index = self._unfinished.pop(done)
class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def start(self, runner):
"""Called by the runner after the generator has yielded.
No other methods will be called on this object before ``start``.
"""
raise NotImplementedError()
def is_ready(self):
"""Called by the runner to determine whether to resume the generator.
Returns a boolean; may be called more than once.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the value to use as the result of the yield expression.
This method will only be called once, and only after `is_ready`
has returned true.
"""
raise NotImplementedError()
class Callback(YieldPoint):
"""Returns a callable object that will allow a matching `Wait` to proceed.
The key may be any value suitable for use as a dictionary key, and is
used to match ``Callbacks`` to their corresponding ``Waits``. The key
must be unique among outstanding callbacks within a single run of the
generator function, but may be reused across different runs of the same
function (so constants generally work fine).
The callback may be called with zero or one arguments; if an argument
is given it will be returned by `Wait`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
runner.register_callback(self.key)
def is_ready(self):
return True
def get_result(self):
return self.runner.result_callback(self.key)
class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class WaitAll(YieldPoint):
"""Returns the results of multiple previous `Callbacks <Callback>`.
The argument is a sequence of `Callback` keys, and the result is
a list of results in the same order.
`WaitAll` is equivalent to yielding a list of `Wait` objects.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, keys):
self.keys = keys
def start(self, runner):
self.runner = runner
def is_ready(self):
return all(self.runner.is_ready(key) for key in self.keys)
def get_result(self):
return [self.runner.pop_result(key) for key in self.keys]
def Task(func, *args, **kwargs):
"""Adapts a callback-based asynchronous function for use in coroutines.
Takes a function (and optional additional arguments) and runs it with
those arguments plus a ``callback`` keyword argument. The argument passed
to the callback is returned as the result of the yield expression.
.. versionchanged:: 4.0
``gen.Task`` is now a function that returns a `.Future`, instead of
a subclass of `YieldPoint`. It still behaves the same way when
yielded.
"""
future = Future()
def handle_exception(typ, value, tb):
if future.done():
return False
future.set_exc_info((typ, value, tb))
return True
def set_result(result):
if future.done():
return
future.set_result(result)
with stack_context.ExceptionStackContext(handle_exception):
func(*args, callback=_argument_adapter(set_result), **kwargs)
return future
class YieldFuture(YieldPoint):
def __init__(self, future, io_loop=None):
"""Adapts a `.Future` to the `YieldPoint` interface.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
self.future = future
self.io_loop = io_loop or IOLoop.current()
def start(self, runner):
if not self.future.done():
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key))
else:
self.runner = None
self.result_fn = self.future.result
def is_ready(self):
if self.runner is not None:
return self.runner.is_ready(self.key)
else:
return True
def get_result(self):
if self.runner is not None:
return self.runner.pop_result(self.key).result()
else:
return self.result_fn()
class Multi(YieldPoint):
"""Runs multiple asynchronous operations in parallel.
Takes a list of ``YieldPoints`` or ``Futures`` and returns a list of
their responses. It is not necessary to call `Multi` explicitly,
since the engine will do so automatically when the generator yields
a list of ``YieldPoints`` or a mixture of ``YieldPoints`` and ``Futures``.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
It is not normally necessary to call this class directly, as it
will be created automatically as needed. However, calling it directly
allows you to use the ``quiet_exceptions`` argument to control
the logging of multiple exceptions.
.. versionchanged:: 4.2
If multiple ``YieldPoints`` fail, any exceptions after the first
(which is raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
"""
def __init__(self, children, quiet_exceptions=()):
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
children = children.values()
self.children = []
for i in children:
if is_future(i):
i = YieldFuture(i)
self.children.append(i)
assert all(isinstance(i, YieldPoint) for i in self.children)
self.unfinished_children = set(self.children)
self.quiet_exceptions = quiet_exceptions
def start(self, runner):
for i in self.children:
i.start(runner)
def is_ready(self):
finished = list(itertools.takewhile(
lambda i: i.is_ready(), self.unfinished_children))
self.unfinished_children.difference_update(finished)
return not self.unfinished_children
def get_result(self):
result_list = []
exc_info = None
for f in self.children:
try:
result_list.append(f.get_result())
except Exception as e:
if exc_info is None:
exc_info = sys.exc_info()
else:
if not isinstance(e, self.quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
if exc_info is not None:
raise_exc_info(exc_info)
if self.keys is not None:
return dict(zip(self.keys, result_list))
else:
return list(result_list)
def multi_future(children, quiet_exceptions=()):
"""Wait for multiple asynchronous futures in parallel.
Takes a list of ``Futures`` (but *not* other ``YieldPoints``) and returns
a new Future that resolves when all the other Futures are done.
If all the ``Futures`` succeeded, the returned Future's result is a list
of their results. If any failed, the returned Future raises the exception
of the first one to fail.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
It is not normally necessary to call `multi_future` explcitly,
since the engine will do so automatically when the generator
yields a list of ``Futures``. However, calling it directly
allows you to use the ``quiet_exceptions`` argument to control
the logging of multiple exceptions.
This function is faster than the `Multi` `YieldPoint` because it
does not require the creation of a stack context.
.. versionadded:: 4.0
.. versionchanged:: 4.2
If multiple ``Futures`` fail, any exceptions after the first (which is
raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
"""
if isinstance(children, dict):
keys = list(children.keys())
children = children.values()
else:
keys = None
assert all(is_future(i) for i in children)
unfinished_children = set(children)
future = Future()
if not children:
future.set_result({} if keys is not None else [])
def callback(f):
unfinished_children.remove(f)
if not unfinished_children:
result_list = []
for f in children:
try:
result_list.append(f.result())
except Exception as e:
if future.done():
if not isinstance(e, quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
else:
future.set_exc_info(sys.exc_info())
if not future.done():
if keys is not None:
future.set_result(dict(zip(keys, result_list)))
else:
future.set_result(result_list)
listening = set()
for f in children:
if f not in listening:
listening.add(f)
f.add_done_callback(callback)
return future
def maybe_future(x):
"""Converts ``x`` into a `.Future`.
If ``x`` is already a `.Future`, it is simply returned; otherwise
it is wrapped in a new `.Future`. This is suitable for use as
``result = yield gen.maybe_future(f())`` when you don't know whether
``f()`` returns a `.Future` or not.
"""
if is_future(x):
return x
else:
fut = Future()
fut.set_result(x)
return fut
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
"""Wraps a `.Future` in a timeout.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
Currently only supports Futures, not other `YieldPoint` classes.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
"""
# TODO: allow yield points in addition to futures?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
def error_callback(future):
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
app_log.error("Exception in Future %r after timeout",
future, exc_info=True)
def timeout_callback():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future.add_done_callback(error_callback)
timeout_handle = io_loop.add_timeout(
timeout, timeout_callback)
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future.add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
def sleep(duration):
"""Return a `.Future` that resolves after the given number of seconds.
When used with ``yield`` in a coroutine, this is a non-blocking
analogue to `time.sleep` (which should not be used in coroutines
because it is blocking)::
yield gen.sleep(0.5)
Note that calling this function on its own does nothing; you must
wait on the `.Future` it returns (usually by yielding it).
.. versionadded:: 4.1
"""
f = Future()
IOLoop.current().call_later(duration, lambda: f.set_result(None))
return f
_null_future = Future()
_null_future.set_result(None)
moment = Future()
moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for
one iteration.
This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.
Usage: ``yield gen.moment``
.. versionadded:: 4.0
"""
moment.set_result(None)
class Runner(object):
"""Internal implementation of `tornado.gen.engine`.
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.TracebackFuture`)
"""
def __init__(self, gen, result_future, first_yielded):
self.gen = gen
self.result_future = result_future
self.future = _null_future
self.yield_point = None
self.pending_callbacks = None
self.results = None
self.running = False
self.finished = False
self.had_exception = False
self.io_loop = IOLoop.current()
# For efficiency, we do not create a stack context until we
# reach a YieldPoint (stack contexts are required for the historical
# semantics of YieldPoints, but not for Futures). When we have
# done so, this field will be set and must be called at the end
# of the coroutine.
self.stack_context_deactivate = None
if self.handle_yield(first_yielded):
self.run()
def register_callback(self, key):
"""Adds ``key`` to the list of callbacks."""
if self.pending_callbacks is None:
# Lazily initialize the old-style YieldPoint data structures.
self.pending_callbacks = set()
self.results = {}
if key in self.pending_callbacks:
raise KeyReuseError("key %r is already pending" % (key,))
self.pending_callbacks.add(key)
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
self.future.set_result(self.yield_point.get_result())
except:
self.future.set_exc_info(sys.exc_info())
self.yield_point = None
self.run()
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key)
def run(self):
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
future = self.future
if not future.done():
return
self.future = None
try:
orig_stack_contexts = stack_context._state.contexts
exc_info = None
try:
value = future.result()
except Exception:
self.had_exception = True
exc_info = sys.exc_info()
if exc_info is not None:
yielded = self.gen.throw(*exc_info)
exc_info = None
else:
yielded = self.gen.send(value)
if stack_context._state.contexts is not orig_stack_contexts:
self.gen.throw(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
self.finished = True
self.future = _null_future
if self.pending_callbacks and not self.had_exception:
# If we ran cleanly without waiting on all callbacks
# raise an error (really more of a warning). If we
# had an exception then some callbacks may have been
# orphaned, so skip the check in that case.
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
self.result_future.set_result(getattr(e, 'value', None))
self.result_future = None
self._deactivate_stack_context()
return
except Exception:
self.finished = True
self.future = _null_future
self.result_future.set_exc_info(sys.exc_info())
self.result_future = None
self._deactivate_stack_context()
return
if not self.handle_yield(yielded):
return
finally:
self.running = False
def handle_yield(self, yielded):
# Lists containing YieldPoints require stack contexts;
# other lists are handled via multi_future in convert_yielded.
if (isinstance(yielded, list) and
any(isinstance(f, YieldPoint) for f in yielded)):
yielded = Multi(yielded)
elif (isinstance(yielded, dict) and
any(isinstance(f, YieldPoint) for f in yielded.values())):
yielded = Multi(yielded)
if isinstance(yielded, YieldPoint):
# YieldPoints are too closely coupled to the Runner to go
# through the generic convert_yielded mechanism.
self.future = TracebackFuture()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
self.future.set_result(
yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
# YieldPoint we've seen.
with stack_context.ExceptionStackContext(
self.handle_exception) as deactivate:
self.stack_context_deactivate = deactivate
def cb():
start_yield_point()
self.run()
self.io_loop.add_callback(cb)
return False
else:
start_yield_point()
else:
try:
self.future = convert_yielded(yielded)
except BadYieldError:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if not self.future.done() or self.future is moment:
self.io_loop.add_future(
self.future, lambda f: self.run())
return False
return True
def result_callback(self, key):
return stack_context.wrap(_argument_adapter(
functools.partial(self.set_result, key)))
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.future = TracebackFuture()
self.future.set_exc_info((typ, value, tb))
self.run()
return True
else:
return False
def _deactivate_stack_context(self):
if self.stack_context_deactivate is not None:
self.stack_context_deactivate()
self.stack_context_deactivate = None
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
def _argument_adapter(callback):
"""Returns a function that when invoked runs ``callback`` with one arg.
If the function returned by this function is called with exactly
one argument, that argument is passed to ``callback``. Otherwise
the args tuple and kwargs dict are wrapped in an `Arguments` object.
"""
def wrapper(*args, **kwargs):
if kwargs or len(args) > 1:
callback(Arguments(args, kwargs))
elif args:
callback(args[0])
else:
callback(None)
return wrapper
def convert_yielded(yielded):
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
# Lists and dicts containing YieldPoints were handled separately
# via Multi().
if isinstance(yielded, (list, dict)):
return multi_future(yielded)
elif is_future(yielded):
return yielded
else:
raise BadYieldError("yielded unknown object %r" % (yielded,))
if singledispatch is not None:
convert_yielded = singledispatch(convert_yielded)
|
|
"""
Benching joblib pickle I/O.
Warning: this is slow, and the benchs are easily offset by other disk
activity.
"""
import os
import time
import shutil
import numpy as np
import joblib
import gc
from joblib.disk import disk_used
try:
from memory_profiler import memory_usage
except ImportError:
memory_usage = None
def clear_out():
"""Clear output directory."""
if os.path.exists('out'):
shutil.rmtree('out')
os.mkdir('out')
def kill_disk_cache():
"""Clear disk cache to avoid side effects."""
if os.name == 'posix' and os.uname()[0] == 'Linux':
try:
os.system('sudo sh -c "sync; echo 3 > /proc/sys/vm/drop_caches"')
except IOError as e:
if e.errno == 13:
print('Please run me as root')
else:
raise
else:
# Write ~100M to the disk
open('tmp', 'wb').write(np.random.random(2e7))
def delete_obj(obj):
"""Force destruction of an object."""
if obj is not None:
del obj
gc.collect()
def memory_used(func, *args, **kwargs):
"""Compute memory usage of func."""
if memory_usage is None:
return np.NaN
gc.collect()
mem_use = memory_usage((func, args, kwargs), interval=.001)
return max(mem_use) - min(mem_use)
def timeit(func, *args, **kwargs):
"""Compute the mean execution time of func based on 7 measures."""
times = []
tries = kwargs['tries']
kwargs.pop('tries')
if tries > 1:
tries += 2
for _ in range(tries):
kill_disk_cache()
t0 = time.time()
out = func(*args, **kwargs)
if 1:
# Just time the function
t1 = time.time()
times.append(t1 - t0)
else:
# Compute a hash of the output, to estimate the time
# necessary to access the elements: this is a better
# estimate of the time to load with me mmapping.
joblib.hash(out)
t1 = time.time()
joblib.hash(out)
t2 = time.time()
times.append(t2 - t0 - 2 * (t2 - t1))
times.sort()
return np.mean(times[1:-1]) if tries > 1 else t1 - t0, out
def generate_rand_dict(size,
with_arrays=False,
with_string=False,
array_shape=(10, 10)):
"""Generate dictionary with random values from list of keys."""
ret = {}
rnd = np.random.RandomState(0)
randoms = rnd.random_sample((size))
for key, random in zip(range(size), randoms):
if with_arrays:
ret[str(key)] = rnd.random_sample(array_shape)
elif with_string:
ret[str(key)] = str(random)
else:
ret[str(key)] = int(random)
return ret
def generate_rand_list(size,
with_arrays=False,
with_string=False,
array_shape=(10, 10)):
"""Generate list with random values from list of keys."""
ret = []
rnd = np.random.RandomState(0)
for random in rnd.random_sample((size)):
if with_arrays:
ret.append(rnd.random_sample(array_shape))
elif with_string:
ret.append(str(random))
else:
ret.append(int(random))
return ret
def print_line(dataset, strategy,
write_time, read_time,
mem_write, mem_read,
disk_used):
"""Nice printing function."""
print('% 15s, %12s, % 6.3f, % 7.4f, % 9.1f, % 9.1f, % 5.1f' % (
dataset, strategy,
write_time, read_time,
mem_write, mem_read, disk_used))
def print_bench_summary(args):
"""Nice bench summary function."""
summary = """Benchmark summary:
- Global values:
. Joblib version: {}
. Number of tries to compute mean execution time: {}
. Compression levels : {}
. Compression algorithm: {}
. Memory map mode : {}
. Bench nifti data : {}
. Bench big array : {}
. Bench 2 big arrays : {}
. Bench big dictionary: {}
. Bench array+dict : {}
""".format(joblib.__version__,
args.tries,
", ".join(map(str, args.compress)),
"None" if not args.compress else args.compressor,
args.mmap,
args.nifti,
args.array,
args.arrays,
args.dict,
args.combo)
if args.array:
shape = tuple(args.shape)
size = round(np.multiply.reduce(shape) * 8 / 1024 ** 2, 1)
summary += """
- Big array:
. shape: {}
. size in memory: {} MB
""".format(str(shape), size)
if args.dict:
summary += """
- Big dictionary:
. number of keys: {}
. value type: {}
""".format(args.size, 'np.ndarray'
if args.valuearray else 'str'
if args.valuestring else 'int')
if args.valuearray:
summary += """ . arrays shape: {}
""".format(str(tuple(args.valuearrayshape)))
if args.list:
summary += """
- Big list:
. number of elements: {}
. value type: {}
""".format(args.size, 'np.ndarray'
if args.valuearray else 'str'
if args.valuestring else 'int')
if args.valuearray:
summary += """ . arrays shape: {}
""".format(str(tuple(args.valuearrayshape)))
print(summary)
def bench_compress(dataset, name='',
compress=('zlib', 0), cache_size=0, tries=5):
"""Bench joblib dump and load functions, compress modes."""
# generate output compression strategy string before joblib compatibility
# check as it may override the compress variable with a non tuple type.
compress_str = "Raw" if compress[1] == 0 else "{} {}".format(*compress)
# joblib versions prior to 0.10 doesn't support tuple in compress argument
# so only the second element of the tuple is used for those versions
# and the compression strategy is ignored.
if (isinstance(compress, tuple) and
tuple(map(int, joblib.__version__.split('.')[:2])) < (0, 10)):
compress = compress[1]
time_write = time_read = du = mem_read = mem_write = []
clear_out()
time_write, obj = timeit(joblib.dump, dataset, 'out/test.pkl',
tries=tries,
compress=compress, cache_size=cache_size)
del obj
gc.collect()
mem_write = memory_used(joblib.dump, dataset, 'out/test.pkl',
compress=compress, cache_size=cache_size)
delete_obj(dataset)
du = disk_used('out') / 1024.
time_read, obj = timeit(joblib.load, 'out/test.pkl', tries=tries)
delete_obj(obj)
mem_read = memory_used(joblib.load, 'out/test.pkl')
print_line(name, compress_str, time_write, time_read,
mem_write, mem_read, du)
def bench_mmap(dataset, name='', cache_size=0, mmap_mode='r', tries=5):
"""Bench joblib dump and load functions, memmap modes."""
time_write = time_read = du = []
clear_out()
time_write, _ = timeit(joblib.dump, dataset, 'out/test.pkl',
tries=tries,
cache_size=cache_size)
mem_write = memory_used(joblib.dump, dataset, 'out/test.pkl',
cache_size=cache_size)
delete_obj(dataset)
time_read, obj = timeit(joblib.load, 'out/test.pkl',
tries=tries,
mmap_mode=mmap_mode)
delete_obj(obj)
mem_read = memory_used(joblib.load, 'out/test.pkl', mmap_mode=mmap_mode)
du = disk_used('out') / 1024.
print_line(name, 'mmap %s' % mmap_mode,
time_write, time_read, mem_write, mem_read, du)
def run_bench(func, obj, name, **kwargs):
"""Run the benchmark function."""
func(obj, name, **kwargs)
def run(args):
"""Run the full bench suite."""
if args.summary:
print_bench_summary(args)
if (not args.nifti and not args.array and not args.arrays and
not args.dict and not args.list and not args.combo):
print("Nothing to bench. Exiting")
return
compress_levels = args.compress
compress_method = args.compressor
mmap_mode = args.mmap
container_size = args.size
a1_shape = tuple(args.shape)
a2_shape = (10000000, )
print('% 15s, %12s, % 6s, % 7s, % 9s, % 9s, % 5s' % (
'Dataset', 'strategy', 'write', 'read',
'mem_write', 'mem_read', 'disk'))
if args.nifti:
# Nifti images
try:
import nibabel
except ImportError:
print("nibabel is not installed skipping nifti file benchmark.")
else:
def load_nii(filename):
img = nibabel.load(filename)
return img.get_data(), img.get_affine()
for name, nifti_file in (
('MNI',
'/usr/share/fsl/data/atlases'
'/MNI/MNI-prob-1mm.nii.gz'),
('Juelich',
'/usr/share/fsl/data/atlases'
'/Juelich/Juelich-prob-2mm.nii.gz'), ):
for c_order in (True, False):
name_d = '% 5s(%s)' % (name, 'C' if c_order else 'F')
for compress_level in compress_levels:
d = load_nii(nifti_file)
if c_order:
d = (np.ascontiguousarray(d[0]), d[1])
run_bench(bench_compress, d, name_d,
compress=(compress_method, compress_level),
tries=args.tries)
del d
if not args.nommap:
d = load_nii(nifti_file)
if c_order:
d = (np.ascontiguousarray(d[0]), d[1])
run_bench(bench_mmap, d, name_d,
mmap_mode=mmap_mode, tries=args.tries)
del d
# Generate random seed
rnd = np.random.RandomState(0)
if args.array:
# numpy array
name = '% 5s' % 'Big array'
for compress_level in compress_levels:
a1 = rnd.random_sample(a1_shape)
run_bench(bench_compress, a1, name,
compress=(compress_method, compress_level),
tries=args.tries)
del a1
if not args.nommap:
a1 = rnd.random_sample(a1_shape)
run_bench(bench_mmap, a1, name, mmap_mode=mmap_mode,
tries=args.tries)
del a1
if args.arrays:
# Complex object with 2 big arrays
name = '% 5s' % '2 big arrays'
for compress_level in compress_levels:
obj = [rnd.random_sample(a1_shape), rnd.random_sample(a2_shape)]
run_bench(bench_compress, obj, name,
compress=(compress_method, compress_level),
tries=args.tries)
del obj
if not args.nommap:
obj = [rnd.random_sample(a1_shape), rnd.random_sample(a2_shape)]
run_bench(bench_mmap, obj, name, mmap_mode=mmap_mode,
tries=args.tries)
del obj
if args.dict:
# Big dictionary
name = '% 5s' % 'Big dict'
array_shape = tuple(args.valuearrayshape)
for compress_level in compress_levels:
big_dict = generate_rand_dict(container_size,
with_arrays=args.valuearray,
with_string=args.valuestring,
array_shape=array_shape)
run_bench(bench_compress, big_dict, name,
compress=(compress_method, compress_level),
tries=args.tries)
del big_dict
if not args.nommap:
big_dict = generate_rand_dict(container_size,
with_arrays=args.valuearray,
with_string=args.valuestring,
array_shape=array_shape)
run_bench(bench_mmap, big_dict, name, mmap_mode=mmap_mode,
tries=args.tries)
del big_dict
if args.list:
# Big dictionary
name = '% 5s' % 'Big list'
array_shape = tuple(args.valuearrayshape)
for compress_level in compress_levels:
big_list = generate_rand_list(container_size,
with_arrays=args.valuearray,
with_string=args.valuestring,
array_shape=array_shape)
run_bench(bench_compress, big_list, name,
compress=(compress_method, compress_level),
tries=args.tries)
del big_list
if not args.nommap:
big_list = generate_rand_list(container_size,
with_arrays=args.valuearray,
with_string=args.valuestring,
array_shape=array_shape)
run_bench(bench_mmap, big_list, name, mmap_mode=mmap_mode,
tries=args.tries)
del big_list
if args.combo:
# 2 big arrays with one big dict
name = '% 5s' % 'Dict/arrays'
array_shape = tuple(args.valuearrayshape)
for compress_level in compress_levels:
obj = [rnd.random_sample(a1_shape),
generate_rand_dict(container_size,
with_arrays=args.valuearray,
with_string=args.valuestring,
array_shape=array_shape),
rnd.random_sample(a2_shape)]
run_bench(bench_compress, obj, name,
compress=(compress_method, compress_level),
tries=args.tries)
del obj
if not args.nommap:
obj = [rnd.random_sample(a1_shape),
generate_rand_dict(container_size,
with_arrays=args.valuearray,
with_string=args.valuestring,
array_shape=array_shape),
rnd.random_sample(a2_shape)]
run_bench(bench_mmap, obj, name,
mmap_mode=mmap_mode,
tries=args.tries)
del obj
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Joblib benchmark script")
parser.add_argument('--compress', nargs='+', type=int, default=(0, 3),
help="List of compress levels.")
parser.add_argument('--compressor', type=str, default='zlib',
choices=['zlib', 'gzip', 'bz2', 'xz', 'lzma'],
help="Compression algorithm.")
parser.add_argument('--mmap', type=str, default='r',
choices=['r', 'r+', 'w+'],
help="Memory map mode.")
parser.add_argument('--tries', type=int, default=5,
help="Number of tries to compute execution time"
"mean on.")
parser.add_argument('--shape', nargs='+', type=int, default=(10000, 10000),
help="Big array shape.")
parser.add_argument("-m", "--nommap", action="store_true",
help="Don't bench memmap")
parser.add_argument('--size', type=int, default=10000,
help="Big dictionary size.")
parser.add_argument('--valuearray', action="store_true",
help="Use numpy arrays type in containers "
"(list, dict)")
parser.add_argument('--valuearrayshape', nargs='+', type=int,
default=(10, 10),
help="Shape of arrays in big containers.")
parser.add_argument('--valuestring', action="store_true",
help="Use string type in containers (list, dict).")
parser.add_argument("-n", "--nifti", action="store_true",
help="Benchmark Nifti data")
parser.add_argument("-a", "--array", action="store_true",
help="Benchmark single big numpy array")
parser.add_argument("-A", "--arrays", action="store_true",
help="Benchmark list of big numpy arrays")
parser.add_argument("-d", "--dict", action="store_true",
help="Benchmark big dictionary.")
parser.add_argument("-l", "--list", action="store_true",
help="Benchmark big list.")
parser.add_argument("-c", "--combo", action="store_true",
help="Benchmark big dictionary + list of "
"big numpy arrays.")
parser.add_argument("-s", "--summary", action="store_true",
help="Show bench summary.")
run(parser.parse_args())
|
|
#!/usr/bin/env python
"""
Release script for botan (https://botan.randombit.net/)
This script requires Python 2.7 or 3.6
(C) 2011,2012,2013,2015,2016,2017 Jack Lloyd
Botan is released under the Simplified BSD License (see license.txt)
"""
import datetime
import errno
import hashlib
import io
import logging
import optparse # pylint: disable=deprecated-module
import os
import re
import shutil
import subprocess
import sys
import tarfile
import time
import traceback
# This is horrible, but there is no way to override tarfile's use of time.time
# in setting the gzip header timestamp, which breaks deterministic archives
GZIP_HEADER_TIME = 0
def fake_time():
return GZIP_HEADER_TIME
time.time = fake_time
def check_subprocess_results(subproc, name):
(raw_stdout, raw_stderr) = subproc.communicate()
stderr = raw_stderr.decode('utf-8')
if subproc.returncode != 0:
stdout = raw_stdout.decode('utf-8')
if stdout != '':
logging.error(stdout)
if stderr != '':
logging.error(stderr)
raise Exception('Running %s failed' % (name))
else:
if stderr != '':
logging.warning(stderr)
return raw_stdout
def run_git(args):
cmd = ['git'] + args
logging.debug('Running %s' % (' '.join(cmd)))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return check_subprocess_results(proc, 'git')
def maybe_gpg(val):
val = val.decode('ascii')
if 'BEGIN PGP SIGNATURE' in val:
return val.split('\n')[-2]
else:
return val.strip()
def rel_time_to_epoch(year, month, day, hour, minute, second):
dt = datetime.datetime(year, month, day, hour, minute, second)
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
def datestamp(tag):
ts = maybe_gpg(run_git(['show', '--no-patch', '--format=%ai', tag]))
ts_matcher = re.compile(r'^(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2}) .*')
logging.debug('Git returned timestamp of %s for tag %s' % (ts, tag))
match = ts_matcher.match(ts)
if match is None:
logging.error('Failed parsing timestamp "%s" of tag %s' % (ts, tag))
return 0
rel_date = int(match.group(1) + match.group(2) + match.group(3))
rel_epoch = rel_time_to_epoch(*[int(match.group(i)) for i in range(1, 7)])
return rel_date, rel_epoch
def revision_of(tag):
return maybe_gpg(run_git(['show', '--no-patch', '--format=%H', tag]))
def extract_revision(revision, to):
tar_val = run_git(['archive', '--format=tar', '--prefix=%s/' % (to), revision])
tar_f = tarfile.open(fileobj=io.BytesIO(tar_val))
tar_f.extractall()
def gpg_sign(keyid, passphrase_file, files, detached=True):
options = ['--armor', '--detach-sign'] if detached else ['--clearsign']
gpg_cmd = ['gpg', '--batch'] + options + ['--local-user', keyid]
if passphrase_file != None:
gpg_cmd[1:1] = ['--passphrase-file', passphrase_file]
for filename in files:
logging.info('Signing %s using PGP id %s' % (filename, keyid))
cmd = gpg_cmd + [filename]
logging.debug('Running %s' % (' '.join(cmd)))
gpg = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
check_subprocess_results(gpg, 'gpg')
return [filename + '.asc' for filename in files]
def parse_args(args):
parser = optparse.OptionParser(
"usage: %prog [options] <version tag>\n" +
" %prog [options] snapshot <branch>"
)
parser.add_option('--verbose', action='store_true',
default=False, help='Extra debug output')
parser.add_option('--quiet', action='store_true',
default=False, help='Only show errors')
parser.add_option('--output-dir', metavar='DIR', default='.',
help='Where to place output (default %default)')
parser.add_option('--print-output-names', action='store_true',
help='Print output archive filenames to stdout')
parser.add_option('--archive-types', metavar='LIST', default='tgz',
help='Set archive types to generate (default %default)')
parser.add_option('--pgp-key-id', metavar='KEYID',
default='EFBADFBC',
help='PGP signing key (default %default, "none" to disable)')
parser.add_option('--pgp-passphrase-file', metavar='FILE',
default=None,
help='PGP signing key passphrase file')
parser.add_option('--write-hash-file', metavar='FILE', default=None,
help='Write a file with checksums')
return parser.parse_args(args)
def remove_file_if_exists(fspath):
try:
os.unlink(fspath)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def rewrite_version_file(version_file, target_version, snapshot_branch, rev_id, rel_date):
if snapshot_branch:
assert target_version == snapshot_branch
version_file_name = os.path.basename(version_file)
contents = open(version_file).readlines()
version_re = re.compile('release_(major|minor|patch) = ([0-9]+)')
def content_rewriter():
for line in contents:
if not snapshot_branch:
# Verify the version set in the source matches the tag
match = version_re.match(line)
if match:
name_to_idx = {
'major': 0,
'minor': 1,
'patch': 2
}
version_parts = target_version.split('.')
assert len(version_parts) == 3
in_tag = int(version_parts[name_to_idx[match.group(1)]])
in_file = int(match.group(2))
if in_tag != in_file:
raise Exception('Version number part "%s" in %s does not match tag %s' %
(match.group(1), version_file_name, target_version))
if line == 'release_vc_rev = None\n':
yield 'release_vc_rev = \'git:%s\'\n' % (rev_id)
elif line == 'release_datestamp = 0\n':
yield 'release_datestamp = %d\n' % (rel_date)
elif line == "release_type = \'unreleased\'\n":
if target_version == snapshot_branch:
yield "release_type = 'snapshot:%s'\n" % (snapshot_branch)
else:
yield "release_type = 'release'\n"
else:
yield line
open(version_file, 'w').write(''.join(list(content_rewriter())))
def write_archive(output_basename, archive_type, rel_epoch, all_files, hash_file):
output_archive = output_basename + '.' + archive_type
logging.info('Writing archive "%s"' % (output_archive))
remove_file_if_exists(output_archive)
remove_file_if_exists(output_archive + '.asc')
def write_mode(archive_type):
if archive_type == 'tgz':
return 'w:gz'
elif archive_type == 'tbz':
return 'w:bz2'
elif archive_type == 'tar':
return 'w'
else:
raise Exception("Unknown archive type '%s'" % (archive_type))
# gzip format embeds the original filename, tarfile.py does the wrong
# thing unless the output name ends in .gz. So pass an explicit
# fileobj in that case, and supply a name in the form tarfile expects.
if archive_type == 'tgz':
archive = tarfile.open(output_basename + '.tar.gz',
write_mode(archive_type),
fileobj=open(output_archive, 'wb'))
else:
archive = tarfile.open(output_basename + '.tar',
write_mode(archive_type))
for f in all_files:
tarinfo = archive.gettarinfo(f)
tarinfo.uid = 500
tarinfo.gid = 500
tarinfo.uname = "botan"
tarinfo.gname = "botan"
tarinfo.mtime = rel_epoch
archive.addfile(tarinfo, open(f, 'rb'))
archive.close()
sha256 = hashlib.new('sha256')
sha256.update(open(output_archive, 'rb').read())
archive_hash = sha256.hexdigest().upper()
logging.info('SHA-256(%s) = %s' % (output_archive, archive_hash))
if hash_file != None:
hash_file.write("%s %s\n" % (archive_hash, output_archive))
return output_archive
def configure_logging(options):
class ExitOnErrorLogHandler(logging.StreamHandler, object):
def emit(self, record):
super(ExitOnErrorLogHandler, self).emit(record)
# Exit script if and ERROR or worse occurred
if record.levelno >= logging.ERROR:
if sys.exc_info()[2] != None:
logging.info(traceback.format_exc())
sys.exit(1)
def log_level():
if options.verbose:
return logging.DEBUG
if options.quiet:
return logging.ERROR
return logging.INFO
lh = ExitOnErrorLogHandler(sys.stderr)
lh.setFormatter(logging.Formatter('%(levelname) 7s: %(message)s'))
logging.getLogger().addHandler(lh)
logging.getLogger().setLevel(log_level())
def main(args=None):
# pylint: disable=too-many-branches,too-many-locals
if args is None:
args = sys.argv[1:]
(options, args) = parse_args(args)
configure_logging(options)
if len(args) != 1 and len(args) != 2:
logging.error('Usage: %s [options] <version tag>' % (sys.argv[0]))
snapshot_branch = None
target_version = None
archives = options.archive_types.split(',') if options.archive_types != '' else []
for archive_type in archives:
if archive_type not in ['tar', 'tgz', 'tbz']:
logging.error('Unknown archive type "%s"' % (archive_type))
if args[0] == 'snapshot':
if len(args) != 2:
logging.error('Missing branch name for snapshot command')
snapshot_branch = args[1]
else:
if len(args) != 1:
logging.error('Usage error, try --help')
target_version = args[0]
if snapshot_branch:
logging.info('Creating snapshot release from branch %s', snapshot_branch)
target_version = snapshot_branch
elif len(args) == 1:
try:
logging.info('Creating release for version %s' % (target_version))
(major, minor, patch) = map(int, target_version.split('.'))
assert target_version == '%d.%d.%d' % (major, minor, patch)
target_version = target_version
except ValueError as e:
logging.error('Invalid version number %s' % (target_version))
rev_id = revision_of(target_version)
if rev_id == '':
logging.error('No tag matching %s found' % (target_version))
rel_date, rel_epoch = datestamp(target_version)
if rel_date == 0 or rel_epoch == 0:
logging.error('No date found for version, git error?')
logging.info('Found %s at revision id %s released %d' % (target_version, rev_id, rel_date))
global GZIP_HEADER_TIME # pylint: disable=global-statement
GZIP_HEADER_TIME = rel_epoch
def output_name():
if snapshot_branch:
if snapshot_branch == 'master':
return 'Botan-snapshot-%s' % (rel_date)
else:
return 'Botan-snapshot-%s-%s' % (snapshot_branch, rel_date)
else:
return 'Botan-' + target_version
output_basename = output_name()
logging.debug('Output basename %s' % (output_basename))
if os.access(output_basename, os.X_OK):
logging.info('Removing existing output dir %s' % (output_basename))
shutil.rmtree(output_basename)
extract_revision(rev_id, output_basename)
all_files = []
for (curdir, _, files) in os.walk(output_basename):
all_files += [os.path.join(curdir, f) for f in files]
all_files.sort(key=lambda f: (os.path.dirname(f), os.path.basename(f)))
version_file = None
for possible_version_file in ['version.txt', 'botan_version.py']:
full_path = os.path.join(output_basename, possible_version_file)
if os.access(full_path, os.R_OK):
version_file = full_path
break
if not os.access(version_file, os.R_OK):
logging.error('Cannot read %s' % (version_file))
rewrite_version_file(version_file, target_version, snapshot_branch, rev_id, rel_date)
try:
os.makedirs(options.output_dir)
except OSError as e:
if e.errno != errno.EEXIST:
logging.error('Creating dir %s failed %s' % (options.output_dir, e))
output_files = []
hash_file = None
if options.write_hash_file != None:
hash_file = open(options.write_hash_file, 'w')
for archive_type in archives:
output_files.append(write_archive(output_basename, archive_type, rel_epoch, all_files, hash_file))
if hash_file != None:
hash_file.close()
shutil.rmtree(output_basename)
if options.pgp_key_id != 'none':
if options.write_hash_file != None:
output_files += gpg_sign(options.pgp_key_id, options.pgp_passphrase_file,
[options.write_hash_file], False)
else:
output_files += gpg_sign(options.pgp_key_id, options.pgp_passphrase_file,
output_files, True)
if options.output_dir != '.':
for output_file in output_files:
logging.debug('Moving %s to %s' % (output_file, options.output_dir))
shutil.move(output_file, os.path.join(options.output_dir, output_file))
if options.print_output_names:
for output_file in output_files:
print(output_file)
return 0
if __name__ == '__main__':
try:
sys.exit(main())
except Exception as e: # pylint: disable=broad-except
logging.info(traceback.format_exc())
logging.error(e)
sys.exit(1)
|
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
from robot.errors import DataError
from robot.variables import is_var
from robot.output import LOGGER
from robot.writer import DataFileWriter
from robot.utils import abspath, is_string, normalize, NormalizedDict
from .comments import Comment
from .populators import FromFilePopulator, FromDirectoryPopulator
from .settings import (Documentation, Fixture, Timeout, Tags, Metadata,
Library, Resource, Variables, Arguments, Return,
Template, MetadataList, ImportList)
def TestData(parent=None, source=None, include_suites=None,
warn_on_skipped=False):
"""Parses a file or directory to a corresponding model object.
:param parent: (optional) parent to be used in creation of the model object.
:param source: path where test data is read from.
:returns: :class:`~.model.TestDataDirectory` if `source` is a directory,
:class:`~.model.TestCaseFile` otherwise.
"""
if os.path.isdir(source):
return TestDataDirectory(parent, source).populate(include_suites,
warn_on_skipped)
return TestCaseFile(parent, source).populate()
class _TestData(object):
_setting_table_names = 'Setting', 'Settings', 'Metadata'
_variable_table_names = 'Variable', 'Variables'
_testcase_table_names = 'Test Case', 'Test Cases'
_keyword_table_names = 'Keyword', 'Keywords', 'User Keyword', 'User Keywords'
def __init__(self, parent=None, source=None):
self.parent = parent
self.source = abspath(source) if source else None
self.children = []
self._tables = NormalizedDict(self._get_tables())
def _get_tables(self):
for names, table in [(self._setting_table_names, self.setting_table),
(self._variable_table_names, self.variable_table),
(self._testcase_table_names, self.testcase_table),
(self._keyword_table_names, self.keyword_table)]:
for name in names:
yield name, table
def start_table(self, header_row):
try:
table = self._tables[header_row[0]]
except (KeyError, IndexError):
return None
if not self._table_is_allowed(table):
return None
table.set_header(header_row)
return table
@property
def name(self):
return self._format_name(self._get_basename()) if self.source else None
def _get_basename(self):
return os.path.splitext(os.path.basename(self.source))[0]
def _format_name(self, name):
name = self._strip_possible_prefix_from_name(name)
name = name.replace('_', ' ').strip()
return name.title() if name.islower() else name
def _strip_possible_prefix_from_name(self, name):
return name.split('__', 1)[-1]
@property
def keywords(self):
return self.keyword_table.keywords
@property
def imports(self):
return self.setting_table.imports
def report_invalid_syntax(self, message, level='ERROR'):
initfile = getattr(self, 'initfile', None)
path = os.path.join(self.source, initfile) if initfile else self.source
LOGGER.write("Error in file '%s': %s" % (path, message), level)
def save(self, **options):
"""Writes this datafile to disk.
:param options: Configuration for writing. These are passed to
:py:class:`~robot.writer.datafilewriter.WritingContext` as
keyword arguments.
See also :py:class:`robot.writer.datafilewriter.DataFileWriter`
"""
return DataFileWriter(**options).write(self)
class TestCaseFile(_TestData):
"""The parsed test case file object.
:param parent: parent object to be used in creation of the model object.
:param source: path where test data is read from.
"""
def __init__(self, parent=None, source=None):
self.directory = os.path.dirname(source) if source else None
self.setting_table = TestCaseFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, parent, source)
def populate(self):
FromFilePopulator(self).populate(self.source)
self._validate()
return self
def _validate(self):
if not self.testcase_table.is_started():
raise DataError('File has no test case table.')
def _table_is_allowed(self, table):
return True
def has_tests(self):
return True
def __iter__(self):
for table in [self.setting_table, self.variable_table,
self.testcase_table, self.keyword_table]:
yield table
class ResourceFile(_TestData):
"""The parsed resource file object.
:param source: path where resource file is read from.
"""
def __init__(self, source=None):
self.directory = os.path.dirname(source) if source else None
self.setting_table = ResourceFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, source=source)
def populate(self):
FromFilePopulator(self).populate(self.source)
self._report_status()
return self
def _report_status(self):
if self.setting_table or self.variable_table or self.keyword_table:
LOGGER.info("Imported resource file '%s' (%d keywords)."
% (self.source, len(self.keyword_table.keywords)))
else:
LOGGER.warn("Imported resource file '%s' is empty." % self.source)
def _table_is_allowed(self, table):
if table is self.testcase_table:
raise DataError("Resource file '%s' contains a test case table "
"which is not allowed." % self.source)
return True
def __iter__(self):
for table in [self.setting_table, self.variable_table, self.keyword_table]:
yield table
class TestDataDirectory(_TestData):
"""The parsed test data directory object. Contains hiearchical structure
of other :py:class:`.TestDataDirectory` and :py:class:`.TestCaseFile`
objects.
:param parent: parent object to be used in creation of the model object.
:param source: path where test data is read from.
"""
def __init__(self, parent=None, source=None):
self.directory = source
self.initfile = None
self.setting_table = InitFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, parent, source)
def populate(self, include_suites=None, warn_on_skipped=False, recurse=True):
FromDirectoryPopulator().populate(self.source, self, include_suites,
warn_on_skipped, recurse)
self.children = [ch for ch in self.children if ch.has_tests()]
return self
def _get_basename(self):
return os.path.basename(self.source)
def _table_is_allowed(self, table):
if table is self.testcase_table:
LOGGER.error("Test suite init file in '%s' contains a test case "
"table which is not allowed." % self.source)
return False
return True
def add_child(self, path, include_suites):
self.children.append(TestData(parent=self,source=path,
include_suites=include_suites))
def has_tests(self):
return any(ch.has_tests() for ch in self.children)
def __iter__(self):
for table in [self.setting_table, self.variable_table, self.keyword_table]:
yield table
class _Table(object):
def __init__(self, parent):
self.parent = parent
self._header = None
def set_header(self, header):
self._header = self._prune_old_style_headers(header)
def _prune_old_style_headers(self, header):
if len(header) < 3:
return header
if self._old_header_matcher.match(header):
return [header[0]]
return header
@property
def header(self):
return self._header or [self.type.title() + 's']
@property
def name(self):
return self.header[0]
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax(message, level)
def __nonzero__(self):
return bool(self._header or len(self))
def __len__(self):
return sum(1 for item in self)
class _WithSettings(object):
def get_setter(self, setting_name):
normalized = self.normalize(setting_name)
if normalized in self._setters:
return self._setters[normalized](self)
self.report_invalid_syntax("Non-existing setting '%s'." % setting_name)
def is_setting(self, setting_name):
return self.normalize(setting_name) in self._setters
def normalize(self, setting):
result = normalize(setting)
return result[0:-1] if result and result[-1]==':' else result
class _SettingTable(_Table, _WithSettings):
type = 'setting'
def __init__(self, parent):
_Table.__init__(self, parent)
self.doc = Documentation('Documentation', self)
self.suite_setup = Fixture('Suite Setup', self)
self.suite_teardown = Fixture('Suite Teardown', self)
self.test_setup = Fixture('Test Setup', self)
self.test_teardown = Fixture('Test Teardown', self)
self.force_tags = Tags('Force Tags', self)
self.default_tags = Tags('Default Tags', self)
self.test_template = Template('Test Template', self)
self.test_timeout = Timeout('Test Timeout', self)
self.metadata = MetadataList(self)
self.imports = ImportList(self)
@property
def _old_header_matcher(self):
return OldStyleSettingAndVariableTableHeaderMatcher()
def add_metadata(self, name, value='', comment=None):
self.metadata.add(Metadata(self, name, value, comment))
return self.metadata[-1]
def add_library(self, name, args=None, comment=None):
self.imports.add(Library(self, name, args, comment=comment))
return self.imports[-1]
def add_resource(self, name, invalid_args=None, comment=None):
self.imports.add(Resource(self, name, invalid_args, comment=comment))
return self.imports[-1]
def add_variables(self, name, args=None, comment=None):
self.imports.add(Variables(self, name, args, comment=comment))
return self.imports[-1]
def __len__(self):
return sum(1 for setting in self if setting.is_set())
class TestCaseFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'suitesetup': lambda s: s.suite_setup.populate,
'suiteprecondition': lambda s: s.suite_setup.populate,
'suiteteardown': lambda s: s.suite_teardown.populate,
'suitepostcondition': lambda s: s.suite_teardown.populate,
'testsetup': lambda s: s.test_setup.populate,
'testprecondition': lambda s: s.test_setup.populate,
'testteardown': lambda s: s.test_teardown.populate,
'testpostcondition': lambda s: s.test_teardown.populate,
'forcetags': lambda s: s.force_tags.populate,
'defaulttags': lambda s: s.default_tags.populate,
'testtemplate': lambda s: s.test_template.populate,
'testtimeout': lambda s: s.test_timeout.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables,
'metadata': lambda s: s.metadata.populate}
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags,
self.default_tags, self.test_template, self.test_timeout] \
+ self.metadata.data + self.imports.data:
yield setting
class ResourceFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables}
def __iter__(self):
for setting in [self.doc] + self.imports.data:
yield setting
class InitFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'suitesetup': lambda s: s.suite_setup.populate,
'suiteprecondition': lambda s: s.suite_setup.populate,
'suiteteardown': lambda s: s.suite_teardown.populate,
'suitepostcondition': lambda s: s.suite_teardown.populate,
'testsetup': lambda s: s.test_setup.populate,
'testprecondition': lambda s: s.test_setup.populate,
'testteardown': lambda s: s.test_teardown.populate,
'testpostcondition': lambda s: s.test_teardown.populate,
'testtimeout': lambda s: s.test_timeout.populate,
'forcetags': lambda s: s.force_tags.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables,
'metadata': lambda s: s.metadata.populate}
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags,
self.test_timeout] + self.metadata.data + self.imports.data:
yield setting
class VariableTable(_Table):
type = 'variable'
def __init__(self, parent):
_Table.__init__(self, parent)
self.variables = []
@property
def _old_header_matcher(self):
return OldStyleSettingAndVariableTableHeaderMatcher()
def add(self, name, value, comment=None):
self.variables.append(Variable(self, name, value, comment))
def __iter__(self):
return iter(self.variables)
class TestCaseTable(_Table):
type = 'test case'
def __init__(self, parent):
_Table.__init__(self, parent)
self.tests = []
@property
def _old_header_matcher(self):
return OldStyleTestAndKeywordTableHeaderMatcher()
def add(self, name):
self.tests.append(TestCase(self, name))
return self.tests[-1]
def __iter__(self):
return iter(self.tests)
def is_started(self):
return bool(self._header)
def __nonzero__(self):
return True
class KeywordTable(_Table):
type = 'keyword'
def __init__(self, parent):
_Table.__init__(self, parent)
self.keywords = []
@property
def _old_header_matcher(self):
return OldStyleTestAndKeywordTableHeaderMatcher()
def add(self, name):
self.keywords.append(UserKeyword(self, name))
return self.keywords[-1]
def __iter__(self):
return iter(self.keywords)
class Variable(object):
def __init__(self, parent, name, value, comment=None):
self.parent = parent
self.name = name.rstrip('= ')
if name.startswith('$') and value == []:
value = ''
if is_string(value):
value = [value]
self.value = value
self.comment = Comment(comment)
def as_list(self):
if self.has_data():
return [self.name] + self.value + self.comment.as_list()
return self.comment.as_list()
def is_set(self):
return True
def is_for_loop(self):
return False
def has_data(self):
return bool(self.name or ''.join(self.value))
def __nonzero__(self):
return self.has_data()
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax("Setting variable '%s' failed: %s"
% (self.name, message), level)
class _WithSteps(object):
def add_step(self, content, comment=None):
self.steps.append(Step(content, comment))
return self.steps[-1]
def copy(self, name):
new = copy.deepcopy(self)
new.name = name
self._add_to_parent(new)
return new
class TestCase(_WithSteps, _WithSettings):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.template = Template('[Template]', self)
self.tags = Tags('[Tags]', self)
self.setup = Fixture('[Setup]', self)
self.teardown = Fixture('[Teardown]', self)
self.timeout = Timeout('[Timeout]', self)
self.steps = []
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'template': lambda s: s.template.populate,
'setup': lambda s: s.setup.populate,
'precondition': lambda s: s.setup.populate,
'teardown': lambda s: s.teardown.populate,
'postcondition': lambda s: s.teardown.populate,
'tags': lambda s: s.tags.populate,
'timeout': lambda s: s.timeout.populate}
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def add_for_loop(self, declaration, comment=None):
self.steps.append(ForLoop(declaration, comment))
return self.steps[-1]
def report_invalid_syntax(self, message, level='ERROR'):
type_ = 'test case' if type(self) is TestCase else 'keyword'
message = "Invalid syntax in %s '%s': %s" % (type_, self.name, message)
self.parent.report_invalid_syntax(message, level)
def _add_to_parent(self, test):
self.parent.tests.append(test)
@property
def settings(self):
return [self.doc, self.tags, self.setup, self.template, self.timeout,
self.teardown]
def __iter__(self):
for element in [self.doc, self.tags, self.setup,
self.template, self.timeout] \
+ self.steps + [self.teardown]:
yield element
class UserKeyword(TestCase):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.args = Arguments('[Arguments]', self)
self.return_ = Return('[Return]', self)
self.timeout = Timeout('[Timeout]', self)
self.teardown = Fixture('[Teardown]', self)
self.tags = Tags('[Tags]', self)
self.steps = []
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'arguments': lambda s: s.args.populate,
'return': lambda s: s.return_.populate,
'timeout': lambda s: s.timeout.populate,
'teardown': lambda s: s.teardown.populate,
'tags': lambda s: s.tags.populate}
def _add_to_parent(self, test):
self.parent.keywords.append(test)
@property
def settings(self):
return [self.args, self.doc, self.tags, self.timeout, self.teardown, self.return_]
def __iter__(self):
for element in [self.args, self.doc, self.tags, self.timeout] \
+ self.steps + [self.teardown, self.return_]:
yield element
class ForLoop(_WithSteps):
"""The parsed representation of a for-loop.
:param list declaration: The literal cell values that declare the loop
(excluding ":FOR").
:param str comment: A comment, default None.
:ivar str flavor: The value of the 'IN' item, uppercased.
Typically 'IN', 'IN RANGE', 'IN ZIP', or 'IN ENUMERATE'.
:ivar list vars: Variables set per-iteration by this loop.
:ivar list items: Loop values that come after the 'IN' item.
:ivar str comment: A comment, or None.
:ivar list steps: A list of steps in the loop.
"""
def __init__(self, declaration, comment=None):
self.flavor, index = self._get_flavors_and_index(declaration)
self.vars = declaration[:index]
self.items = declaration[index+1:]
self.comment = Comment(comment)
self.steps = []
def _get_flavors_and_index(self, declaration):
for index, item in enumerate(declaration):
item = item.upper()
if item.replace(' ', '').startswith('IN'):
return item, index
return 'IN', len(declaration)
def is_comment(self):
return False
def is_for_loop(self):
return True
def as_list(self, indent=False, include_comment=True):
comments = self.comment.as_list() if include_comment else []
return [': FOR'] + self.vars + [self.flavor] + self.items + comments
def __iter__(self):
return iter(self.steps)
def is_set(self):
return True
class Step(object):
def __init__(self, content, comment=None):
self.assign = list(self._get_assigned_vars(content))
try:
self.name = content[len(self.assign)]
except IndexError:
self.name = None
self.args = content[len(self.assign)+1:]
self.comment = Comment(comment)
def _get_assigned_vars(self, content):
for item in content:
if not is_var(item.rstrip('= ')):
return
yield item
def is_comment(self):
return not (self.assign or self.name or self.args)
def is_for_loop(self):
return False
def is_set(self):
return True
def as_list(self, indent=False, include_comment=True):
kw = [self.name] if self.name is not None else []
comments = self.comment.as_list() if include_comment else []
data = self.assign + kw + self.args + comments
if indent:
data.insert(0, '')
return data
class OldStyleSettingAndVariableTableHeaderMatcher(object):
def match(self, header):
return all((True if e.lower() == 'value' else False)
for e in header[1:])
class OldStyleTestAndKeywordTableHeaderMatcher(object):
def match(self, header):
if header[1].lower() != 'action':
return False
for h in header[2:]:
if not h.lower().startswith('arg'):
return False
return True
|
|
#!/usr/bin/env python2.7
"""opencachehttp.py: HTTP Server - serves cached HTTP objects to requesting clients."""
import BaseHTTPServer
import collections
import hashlib
import httplib
import os
import signal
import SocketServer
import sys
import threading
import opencache.lib.opencachelib as lib
import opencache.node.state.opencachemongodb as database
import zmq
TAG = 'server'
class Server:
_server_path = None
_server = None
_port = None
_ipc_socket = None
_context = None
_node = None
_expr = None
_load = 0
_load_data = None
def __init__(self, node, expr, port):
"""Initialise server instance.
Creates new connection manager. Creates new HTTP server. Passes objects to the server to facilitate
callbacks. Sets server status to 'start'. Runs server until terminated.
"""
self._setup_signal_handling()
self._database = node.database
self._node = node
self._expr = expr
self._port = port
self._load_data = collections.deque(maxlen=int(self._node.config["stat_refresh"]))
self._set_path(expr)
lib.create_directory(self._server_path)
self._server = self.ThreadedHTTPServer(('', self._port), self.HandlerClass)
#self._server = self.ThreadedHTTPServer((self._node.config["node_host"], self._port), self.HandlerClass)
self._server._setup_signal_handling()
self._server._server = self
self._server._node = self._node
self._server._expr = self._expr
self._server._server_path = self._server_path
threading.Thread(target=self._conn_manager, args=(expr, )).start()
threading.Thread(target=self._load_monitor, args=()).start()
threading.Thread(target=self._stat_reporter, args=()).start()
self._start()
self._server.serve_forever()
def _setup_signal_handling(self):
"""Setup signal handling for SIGQUIT and SIGINT events"""
signal.signal(signal.SIGINT, self._exit_server)
signal.signal(signal.SIGQUIT, self._exit_server)
def _exit_server(self, signal, frame):
raise SystemExit
def _conn_manager(self, expr):
"""Manage inter-process communication (IPC) connections.
Receives messages from the OpenCache node process, instructing it call start/stop/pause/stat methods.
"""
self._context = zmq.Context()
self._ipc_socket = self._context.socket(zmq.SUB)
self._ipc_socket.connect("ipc://oc")
self._ipc_socket.setsockopt_string(zmq.SUBSCRIBE, expr.decode('ascii'))
while True:
string = self._ipc_socket.recv_string()
expr, call, path, transaction = string.split()
if transaction == '?' or path == '?':
getattr(self, "_" + str(call))()
else:
getattr(self, "_" + str(call))(path, transaction)
def _send_message_to_controller(self, message):
"""Send given message to controller notification port."""
context = zmq.Context()
ipc_socket = context.socket(zmq.PUSH)
ipc_socket.connect("tcp://" + self._node.config["controller_host"] + ":" + self._node.config["notification_port"])
ipc_socket.send_json(message)
def _stat_reporter(self):
"""Report statistics back to the controller periodcially."""
threading.Timer(interval=int(self._node.config["stat_refresh"]), function=self._stat_reporter, args=()).start()
self._stat()
def _load_monitor(self):
"""Monitor the request load every second. Send alert to controller if it exceeds a configured amount."""
threading.Timer(interval=int(1), function=self._load_monitor, args=()).start()
self._current_load = self._server._load
self._load_data.append(self._current_load)
self._server._load = 0
if int(self._current_load) > int(self._node.config["alert_load"]):
self._send_message_to_controller(self._get_alert('load', self._current_load))
def _get_average_load(self):
"""Calculate load average over given time."""
average = 0
for data_point in self._load_data:
average += int(data_point)
average = average/int(self._node.config["stat_refresh"])
return average
def _get_alert(self, alert_type, value):
"""Get message body for an alert notification to the controller."""
alert = dict()
alert['method'] = 'alert'
alert['id'] = None
alert['params'] = dict()
alert['params']['expr'] = self._server._expr
alert['params']['node_id'] = self._node.node_id
alert['params']['type'] = alert_type
alert['params']['value'] = value
return alert
def _start(self):
"""Start the HTTP server.
Create directory for cached content to be stored and allow HTTP server to start receiving requests.
Set status to indicate new state.
"""
self._server._stop = False
self._server._status = 'start'
self._send_message_to_controller(self._get_redirect('add'))
def _stop(self):
"""Stop the HTTP server.
Stop HTTP server from receiving requests and remove directory used to store cached content.
Set status to indicate new state.
"""
self._send_message_to_controller(self._get_redirect('remove'))
self._server._stop = True
self._database.remove({'expr' : self._expr})
lib.delete_directory(self._server_path)
self._server._status = 'stop'
self._stat()
def _pause(self):
"""Pause the HTTP server.
Pause HTTP server, temporarily preventing the receipt of requests. Set status to indicate new state.
"""
self._send_message_to_controller(self._get_redirect('remove'))
self._server._stop = True
self._server._status = 'pause'
def _get_redirect(self, action):
"""Get message body for a redirect notification to the controller."""
redirect = dict()
redirect['method'] = 'redir'
redirect['id'] = None
redirect['params'] = dict()
redirect['params']['expr'] = self._server._expr
redirect['params']['node_id'] = self._node.node_id
redirect['params']['host'] = self._node.config['node_host']
redirect['params']['port'] = self._port
redirect['params']['action'] = action
return redirect
def _stat(self):
"""Retrieve statistics for this HTTP server and send them to the controller."""
self._send_message_to_controller(self._get_stats())
def _get_stats(self):
"""Get message body for a statistics notification to the controller.
The statistics returned to the controller include:
status -- current status of the HTTP server
expr -- the OpenCache expression to which this node is serving
node_id -- the ID number given to the node by the OpenCache controller
cache_miss -- number of cache miss (content not found in cache) events (one per request)
cache_miss_size -- number of bytes served whilst handling cache miss (content not found in cache) events
cache_hit -- number of cache hit (content already found in cache) events (one per request)
cache_hit_size -- number of bytes served whilst handling cache hit (content already found in cache) events
cache_object -- number of objects currently stored by the cache
cache_object_size -- size of cached objects on disk (actual, in bytes)
"""
statistics = dict()
statistics['method'] = 'stat'
statistics['id'] = None
statistics['params'] = dict()
statistics['params']['status'] = self._server._status
statistics['params']['avg_load'] = self._get_average_load()
statistics['params']['expr'] = self._server._expr
statistics['params']['node_id'] = self._node.node_id
statistics['params']['cache_miss'] = self._server._cache_miss
statistics['params']['cache_miss_size'] = self._server._cache_miss_size
statistics['params']['cache_hit'] = self._server._cache_hit
statistics['params']['cache_hit_size'] = self._server._cache_hit_size
statistics['params']['cache_object'] = len(self._database.lookup({}))
dir_size = get_dir_size(self._server_path)
statistics['params']['cache_object_size'] = dir_size
return statistics
def _set_path(self, expr):
"""Set the path used to store cached content specific to this HTTP server's expression."""
self._server_path = self._node.config["cache_path"] + hashlib.sha224(expr).hexdigest()
class ThreadedHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""Create a threaded HTTP server."""
allow_reuse_address = True
daemon_threads = True
_stop = True
_status = 'start'
_cache_hit_size = 0
_cache_miss_size = 0
_cache_hit = 0
_cache_miss = 0
_load = 0
_status = None
_node = None
_server_path = None
_expr = None
_server = None
def _setup_signal_handling(self):
"""Setup signal handling for SIGQUIT and SIGINT events"""
signal.signal(signal.SIGINT, self._exit_server)
signal.signal(signal.SIGQUIT, self._exit_server)
def _exit_server(self, signal, frame):
raise SystemExit
def serve_forever (self):
"""Overide default behaviour to handle one request at a time until state is changed.
Serve content as long as the HTTP server is a 'start' state. When 'paused' or 'stopped',
requests will not be handled.
"""
while True:
if self._stop != True:
self.handle_request()
self._load += 1
class HandlerClass(BaseHTTPServer.BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
def log_message( self, format, *args ):
"""Ignore log messages."""
pass
def do_GET(self):
"""Handle incoming GET messages from clients.
Calculate hash value for content request. Check to see if this has already been cached.
If it has, a cache hit occurs. If the content is not present on the disk or has not
been cached previously, a cache miss occurs.
"""
key = hashlib.sha224(self.path).hexdigest()
if len(self.server._server._database.lookup({'key' : key})) == 1:
try:
self._cache_hit(key)
except (IOError, OSError) as e:
self.server._node.print_warn(TAG, ('Could not retrieve content from filesystem, cache miss\'ing instead: %s' % e))
self._cache_miss(key)
else:
self._cache_miss(key)
def do_POST(self):
"""Ignore POST messages."""
pass
def _cache_hit(self, key):
"""The content has been seen before, and should be sent to the client using the cached copy.
Statistics updated accordingly.
"""
path = self.server._server._database.lookup({'key' : key})[0]['path']
try:
self.server._node.print_debug(TAG, 'cache hit: %s%s' %(self.server._expr, self.path))
f = open(path, 'r')
local_object = f.read()
self._send_object(local_object)
f.close()
self.server._cache_hit += 1
self.server._cache_hit_size += sys.getsizeof(local_object)
except IOError:
raise
def _cache_miss(self, key):
"""The content has not been seen before, and needs to be retrieved before it can be
sent to the client.
Once the content has been delivered, the object can be stored on disk to serve future
cache requests. Statistics updated accordingly.
"""
self.server._node.print_debug(TAG, 'cache miss: %s%s' %(self.server._expr, self.path))
remote_object = self._fetch_and_send_object(self.server._expr)
if self._disk_check():
lookup = self.server._server._database.lookup({'key' : key})
if len(lookup) == 1:
object_path = lookup[0]['path']
else:
object_path = self.server._server_path + "/" + key
try:
f = open(object_path, 'w')
f.write(remote_object)
f.close()
self.server._server._database.create({'expr' : self.server._expr, 'key' : key, 'path' : object_path})
except (IOError, OSError) as e:
self.server._node.print_warn(TAG, ('Could not save content to filesystem: %s' % e))
else:
self.server._node.print_info(TAG, 'Cache instance has reached maximum disk usage and cannot store object: %s%s' %(self.server._expr, self.path))
self.server._cache_miss += 1
self.server._cache_miss_size += sys.getsizeof(remote_object)
def _disk_check(self):
"""Check if it possible to write a given object to disk.
If the current directory size is greater than the 'alert_disk' configuration setting, send an alert to the controller.
"""
dir_size = get_dir_size(self.server._server_path)
if int(dir_size) > int(self.server._node.config["alert_disk"]):
self.server._server._send_message_to_controller(self.server._server._get_alert('disk', dir_size))
if int(dir_size) > int(self.server._node.config["max_disk"]):
return False
return True
def _fetch_and_send_object(self, url):
"""Fetch the object from the original external location and deliver this to the client. """
connection = httplib.HTTPConnection(url)
connection.request("GET", self.path)
response = connection.getresponse()
length = int(response.getheader('content-length'))
self.send_response(200)
self.send_header('Content-type','text-html')
self.send_header('Content-length', length)
self.end_headers()
total_payload = ""
bytes_read = 0
while True:
try:
read_payload = response.read(1448)
except Exception as e:
self.server._node.print_error(TAG, 'Could not retrieve content from origin server: %s', e)
break
try:
self.wfile.write(read_payload)
except Exception as e:
self.server._node.print_error(TAG, 'Could not deliver fetched content to client: %s', e)
break
total_payload += read_payload
bytes_read += 1448
if bytes_read > length:
break
connection.close()
self.server._node.print_debug(TAG, 'cache fetched: %s%s at approx. %s bytes' %(url, self.path, bytes_read))
return total_payload
def _send_object(self, data):
"""Deliver the cached object to the client"""
self.send_response(200)
self.send_header('Content-type','text-html')
self.send_header('Content-length', len(data))
self.end_headers()
try:
self.wfile.write(data)
except Exception as e:
self.server._node.print_error(TAG, 'Could not deliver cached content to client: %s', e)
return
def get_dir_size(path):
"""Get size of files (actual, in bytes) for given path"""
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
|
|
### BEGIN LICENSE ###
### Use of the triage tools and related source code is subject to the terms
### of the license below.
###
### ------------------------------------------------------------------------
### Copyright (C) 2011 Carnegie Mellon University. All Rights Reserved.
### Portions Copyright 2013 BlackBerry Ltd. All Rights Reserved.
### ------------------------------------------------------------------------
### Redistribution and use in source and binary forms, with or without
### modification, are permitted provided that the following conditions are
### met:
###
### 1. Redistributions of source code must retain the above copyright
### notice, this list of conditions and the following acknowledgments
### and disclaimers.
###
### 2. Redistributions in binary form must reproduce the above copyright
### notice, this list of conditions and the following disclaimer in the
### documentation and/or other materials provided with the distribution.
###
### 3. The names "Department of Homeland Security," "Carnegie Mellon
### University," "CERT" and/or "Software Engineering Institute" shall
### not be used to endorse or promote products derived from this software
### without prior written permission. For written permission, please
### contact permission@sei.cmu.edu.
###
### 4. Products derived from this software may not be called "CERT" nor
### may "CERT" appear in their names without prior written permission of
### permission@sei.cmu.edu.
###
### 5. Redistributions of any form whatsoever must retain the following
### acknowledgment:
###
### "This product includes software developed by CERT with funding
### and support from the Department of Homeland Security under
### Contract No. FA 8721-05-C-0003."
###
### THIS SOFTWARE IS PROVIDED BY CARNEGIE MELLON UNIVERSITY ``AS IS'' AND
### CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER
### EXPRESS OR IMPLIED, AS TO ANY MATTER, AND ALL SUCH WARRANTIES, INCLUDING
### WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
### EXPRESSLY DISCLAIMED. WITHOUT LIMITING THE GENERALITY OF THE FOREGOING,
### CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND
### RELATING TO EXCLUSIVITY, INFORMATIONAL CONTENT, ERROR-FREE OPERATION,
### RESULTS TO BE OBTAINED FROM USE, FREEDOM FROM PATENT, TRADEMARK AND
### COPYRIGHT INFRINGEMENT AND/OR FREEDOM FROM THEFT OF TRADE SECRETS.
### END LICENSE ###
'''
Contains a list of rules used to classify the state of a GDB Inferior.
Rules are defined by category (ex: "EXPLOITABLE") and are roughly ordered
from "most exploitable" to "least exploitable".
'''
rules = [
('EXPLOITABLE', [
dict(match_function="isUseAfterFree",
desc="Use of previously freed heap buffer",
short_desc="UseAfterFree",
explanation="The target tried to access an address which was within a "
"heap buffer which has already been freed."),
dict(match_function="isSegFaultOnPcNotNearNull",
desc="Segmentation fault on program counter",
short_desc="SegFaultOnPc",
explanation="The target tried to access data at an address that matches "
"the program counter. This is likely due to the execution of a branch "
"instruction (ex: 'call') with a bad argument, but it could also be "
"due to execution continuing past the end of a memory region or another "
"cause. Regardless this likely indicates that the program counter "
"contents are tainted and can be controlled by an attacker."),
dict(match_function="isBranchAvNotNearNull",
desc="Access violation during branch instruction",
short_desc="BranchAv",
explanation="The target crashed on a branch instruction, which may "
"indicate that the control flow is tainted."),
dict(match_function="isErrorWhileExecutingFromStack",
desc="Executing from stack",
short_desc="StackCodeExecution",
explanation="The target stopped on an error while executing code "
"within the process's stack region."),
dict(match_function="isStackBufferOverflow",
desc="Stack buffer overflow",
short_desc="StackBufferOverflow",
explanation="The target stopped while handling a signal that was "
"generated by libc due to detection of a stack buffer overflow. Stack "
"buffer overflows are generally considered exploitable."),
dict(match_function="isPossibleStackCorruption",
desc="Possible stack corruption",
short_desc="PossibleStackCorruption",
explanation="GDB generated an error while unwinding the stack "
"and/or the stack contained return addresses that were not mapped "
"in the inferior's process address space and/or the stack pointer is "
"pointing to a location outside the default stack region. These "
"conditions likely indicate stack corruption, which is generally "
"considered exploitable."),
dict(match_function="isWriteAvNotNearNull",
desc="Access violation on destination operand",
short_desc="WriteAv",
explanation="The target crashed on an access violation at an address "
"matching the destination operand of the instruction. This likely "
"indicates a write access violation, which means the attacker may "
"control the write address and/or value."),
dict(match_function="isMalformedInstructionSignal",
desc="Bad instruction",
short_desc="BadInstruction",
explanation="The target tried to execute a malformed or privileged "
"instruction. This may indicate that the control flow is tainted."),
dict(match_function="isHeapError",
desc="Heap error",
short_desc="HeapError",
explanation="The target's backtrace indicates that libc has detected "
"a heap error or that the target was executing a heap function when it "
"stopped. This could be due to heap corruption, passing a bad pointer to "
"a heap function such as free(), etc. Since heap errors might include "
"buffer overflows, use-after-free situations, etc. they are generally "
"considered exploitable."),
]),
('PROBABLY_EXPLOITABLE', [
dict(match_function="isStackOverflow",
desc="Stack overflow",
short_desc="StackOverflow",
explanation="The target crashed on an access violation where the faulting "
"instruction's mnemonic and the stack pointer seem to indicate a stack "
"overflow."),
dict(match_function="isSegFaultOnPcNearNull",
desc="Segmentation fault on program counter near NULL",
short_desc="SegFaultOnPcNearNull",
explanation="The target tried to access data at an address that matches "
"the program counter. This may indicate that the program "
"counter contents are tainted, however, it may also indicate a simple "
"NULL deference."),
dict(match_function="isBranchAvNearNull",
desc="Access violation near NULL during branch instruction",
short_desc="BranchAvNearNull",
explanation="The target crashed on a branch instruction, which may "
"indicate that the control flow is tainted. However, there is a "
"chance it could be a NULL dereference."),
dict(match_function="isBlockMove",
desc="Access violation during block move",
short_desc="BlockMoveAv",
explanation="The target crashed during a block move, which may indicate "
"that the attacker can control a buffer overflow."),
dict(match_function="isRepAv",
desc="Access violation on 'rep' prefixed instruction",
short_desc="RepAv",
explanation="TODO: in short, we have no stack corruption detction right now. AVs during rep instructions are interesting because they often lead to exploitabile conditions like buffer overflows, so we are including them for now.."),
dict(match_function="isWriteAvNearNull",
desc="Access violation near NULL on destination operand",
short_desc="WriteAvNearNull",
explanation="The target crashed on an access violation at an address "
"matching the destination operand of the instruction. This likely "
"indicates a write access violation, which means the attacker may "
"control write address and/or value. However, it there is a chance "
"it could be a NULL dereference."),
dict(match_function="isReturnAv",
desc="Access violation during return instruction",
short_desc="ReturnAv",
explanation="The target crashed on a return instruction, which likely "
"indicates stack corruption."),
]),
('PROBABLY_NOT_EXPLOITABLE', [
dict(match_function="isReadAvNearNull",
desc="Access violation near NULL on source operand",
short_desc="ReadAvNearNull",
explanation="The target crashed on an access violation at an address "
"matching the source operand of the current instruction. This likely "
"indicates a read access violation, which may mean the application "
"crashed on a simple NULL dereference to data structure that has no "
"immediate effect on control of the processor."),
dict(match_function="isFloatingPointException",
desc="Floating point exception signal",
short_desc="FloatingPointException",
explanation="The target crashed on a floating point exception. This "
"may indicate a division by zero or a number of other floating point "
"errors. It is generally difficult to leverage these types of errors "
"to gain control of the processor."),
dict(match_function="isBenignSignal",
desc="Benign signal",
short_desc="BenignSignal",
explanation="The target is stopped on a signal that either does not "
"indicate an error or indicates an error that is generally not "
"considered exploitable."),
]),
('UNKNOWN', [
dict(match_function="isReadAvNotNearNull",
desc="Access violation on source operand",
short_desc="ReadAv",
explanation="The target crashed on an access violation at an address "
"matching the source operand of the current instruction. This likely "
"indicates a read access violation."),
dict(match_function="isAbortSignal",
desc="Abort signal",
short_desc="AbortSignal",
explanation="The target is stopped on a SIGABRT. SIGABRTs are often "
"generated by libc and compiled check-code to indicate potentially "
"exploitable conditions. Unfortunately this command does not yet further "
"analyze these crashes."),
dict(match_function="isAccessViolationSignal",
desc="Access violation",
short_desc="AccessViolation",
explanation="The target crashed due to an access violation but there is not enough "
"additional information available to determine exploitability."),
dict(match_function="isUncategorizedSignal",
desc="Uncategorized signal",
short_desc="UncategorizedSignal",
explanation="The target is stopped on a signal. This may be an exploitable "
"condition, but this command was unable to categorize it."),
]),
] # end rules
|
|
from django.db.models import Exists, F, IntegerField, OuterRef, Value
from django.db.utils import DatabaseError, NotSupportedError
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from .models import Number, ReservedName
@skipUnlessDBFeature('supports_select_union')
class QuerySetSetOperationTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.bulk_create(Number(num=i) for i in range(10))
def number_transform(self, value):
return value.num
def assertNumbersEqual(self, queryset, expected_numbers, ordered=True):
self.assertQuerysetEqual(queryset, expected_numbers, self.number_transform, ordered)
def test_simple_union(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=8)
qs3 = Number.objects.filter(num=5)
self.assertNumbersEqual(qs1.union(qs2, qs3), [0, 1, 5, 8, 9], ordered=False)
@skipUnlessDBFeature('supports_select_intersection')
def test_simple_intersection(self):
qs1 = Number.objects.filter(num__lte=5)
qs2 = Number.objects.filter(num__gte=5)
qs3 = Number.objects.filter(num__gte=4, num__lte=6)
self.assertNumbersEqual(qs1.intersection(qs2, qs3), [5], ordered=False)
@skipUnlessDBFeature('supports_select_intersection')
def test_intersection_with_values(self):
ReservedName.objects.create(name='a', order=2)
qs1 = ReservedName.objects.all()
reserved_name = qs1.intersection(qs1).values('name', 'order', 'id').get()
self.assertEqual(reserved_name['name'], 'a')
self.assertEqual(reserved_name['order'], 2)
reserved_name = qs1.intersection(qs1).values_list('name', 'order', 'id').get()
self.assertEqual(reserved_name[:2], ('a', 2))
@skipUnlessDBFeature('supports_select_difference')
def test_simple_difference(self):
qs1 = Number.objects.filter(num__lte=5)
qs2 = Number.objects.filter(num__lte=4)
self.assertNumbersEqual(qs1.difference(qs2), [5], ordered=False)
def test_union_distinct(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
self.assertEqual(len(list(qs1.union(qs2, all=True))), 20)
self.assertEqual(len(list(qs1.union(qs2))), 10)
@skipUnlessDBFeature('supports_select_intersection')
def test_intersection_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.intersection(qs2)), 0)
self.assertEqual(len(qs1.intersection(qs3)), 0)
self.assertEqual(len(qs2.intersection(qs1)), 0)
self.assertEqual(len(qs3.intersection(qs1)), 0)
self.assertEqual(len(qs2.intersection(qs2)), 0)
self.assertEqual(len(qs3.intersection(qs3)), 0)
@skipUnlessDBFeature('supports_select_difference')
def test_difference_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.difference(qs2)), 10)
self.assertEqual(len(qs1.difference(qs3)), 10)
self.assertEqual(len(qs2.difference(qs1)), 0)
self.assertEqual(len(qs3.difference(qs1)), 0)
self.assertEqual(len(qs2.difference(qs2)), 0)
self.assertEqual(len(qs3.difference(qs3)), 0)
@skipUnlessDBFeature('supports_select_difference')
def test_difference_with_values(self):
ReservedName.objects.create(name='a', order=2)
qs1 = ReservedName.objects.all()
qs2 = ReservedName.objects.none()
reserved_name = qs1.difference(qs2).values('name', 'order', 'id').get()
self.assertEqual(reserved_name['name'], 'a')
self.assertEqual(reserved_name['order'], 2)
reserved_name = qs1.difference(qs2).values_list('name', 'order', 'id').get()
self.assertEqual(reserved_name[:2], ('a', 2))
def test_union_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.union(qs2)), 10)
self.assertEqual(len(qs2.union(qs1)), 10)
self.assertEqual(len(qs1.union(qs3)), 10)
self.assertEqual(len(qs3.union(qs1)), 10)
self.assertEqual(len(qs2.union(qs1, qs1, qs1)), 10)
self.assertEqual(len(qs2.union(qs1, qs1, all=True)), 20)
self.assertEqual(len(qs2.union(qs2)), 0)
self.assertEqual(len(qs3.union(qs3)), 0)
def test_limits(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
self.assertEqual(len(list(qs1.union(qs2)[:2])), 2)
def test_ordering(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=2, num__lte=3)
self.assertNumbersEqual(qs1.union(qs2).order_by('-num'), [3, 2, 1, 0])
def test_union_with_values(self):
ReservedName.objects.create(name='a', order=2)
qs1 = ReservedName.objects.all()
reserved_name = qs1.union(qs1).values('name', 'order', 'id').get()
self.assertEqual(reserved_name['name'], 'a')
self.assertEqual(reserved_name['order'], 2)
reserved_name = qs1.union(qs1).values_list('name', 'order', 'id').get()
self.assertEqual(reserved_name[:2], ('a', 2))
def test_union_with_two_annotated_values_list(self):
qs1 = Number.objects.filter(num=1).annotate(
count=Value(0, IntegerField()),
).values_list('num', 'count')
qs2 = Number.objects.filter(num=2).values('pk').annotate(
count=F('num'),
).annotate(
num=Value(1, IntegerField()),
).values_list('num', 'count')
self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)])
def test_union_with_extra_and_values_list(self):
qs1 = Number.objects.filter(num=1).extra(
select={'count': 0},
).values_list('num', 'count')
qs2 = Number.objects.filter(num=2).extra(select={'count': 1})
self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)])
def test_union_with_values_list_on_annotated_and_unannotated(self):
ReservedName.objects.create(name='rn1', order=1)
qs1 = Number.objects.annotate(
has_reserved_name=Exists(ReservedName.objects.filter(order=OuterRef('num')))
).filter(has_reserved_name=True)
qs2 = Number.objects.filter(num=9)
self.assertCountEqual(qs1.union(qs2).values_list('num', flat=True), [1, 9])
def test_count_union(self):
qs1 = Number.objects.filter(num__lte=1).values('num')
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values('num')
self.assertEqual(qs1.union(qs2).count(), 4)
def test_count_union_empty_result(self):
qs = Number.objects.filter(pk__in=[])
self.assertEqual(qs.union(qs).count(), 0)
@skipUnlessDBFeature('supports_select_difference')
def test_count_difference(self):
qs1 = Number.objects.filter(num__lt=10)
qs2 = Number.objects.filter(num__lt=9)
self.assertEqual(qs1.difference(qs2).count(), 1)
@skipUnlessDBFeature('supports_select_intersection')
def test_count_intersection(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__lte=5)
self.assertEqual(qs1.intersection(qs2).count(), 1)
@skipUnlessDBFeature('supports_slicing_ordering_in_compound')
def test_ordering_subqueries(self):
qs1 = Number.objects.order_by('num')[:2]
qs2 = Number.objects.order_by('-num')[:2]
self.assertNumbersEqual(qs1.union(qs2).order_by('-num')[:4], [9, 8, 1, 0])
@skipIfDBFeature('supports_slicing_ordering_in_compound')
def test_unsupported_ordering_slicing_raises_db_error(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
msg = 'LIMIT/OFFSET not allowed in subqueries of compound statements'
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2[:10]))
msg = 'ORDER BY not allowed in subqueries of compound statements'
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.order_by('id').union(qs2))
@skipIfDBFeature('supports_select_intersection')
def test_unsupported_intersection_raises_db_error(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
msg = 'intersection is not supported on this database backend'
with self.assertRaisesMessage(NotSupportedError, msg):
list(qs1.intersection(qs2))
def test_combining_multiple_models(self):
ReservedName.objects.create(name='99 little bugs', order=99)
qs1 = Number.objects.filter(num=1).values_list('num', flat=True)
qs2 = ReservedName.objects.values_list('order')
self.assertEqual(list(qs1.union(qs2).order_by('num')), [1, 99])
def test_order_raises_on_non_selected_column(self):
qs1 = Number.objects.filter().annotate(
annotation=Value(1, IntegerField()),
).values('annotation', num2=F('num'))
qs2 = Number.objects.filter().values('id', 'num')
# Should not raise
list(qs1.union(qs2).order_by('annotation'))
list(qs1.union(qs2).order_by('num2'))
msg = 'ORDER BY term does not match any column in the result set'
# 'id' is not part of the select
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by('id'))
# 'num' got realiased to num2
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by('num'))
# switched order, now 'exists' again:
list(qs2.union(qs1).order_by('num'))
|
|
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
import warnings
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
class TestDummyDeviceType(unittest.TestCase):
def test_int(self):
assert int(cuda.DummyDeviceType()) == -1
def test_eq(self):
assert cuda.DummyDeviceType() == cuda.DummyDeviceType()
def test_ne(self):
assert cuda.DummyDeviceType() != 1
_builtins_available = False
try:
import builtins
_builtins_available = True
except ImportError:
pass
class TestCudaModuleAliasForBackwardCompatibility(unittest.TestCase):
def _check(self, code):
temp_dir = tempfile.mkdtemp()
try:
script_path = os.path.join(temp_dir, 'script.py')
with open(script_path, 'w') as f:
f.write(code)
proc = subprocess.Popen(
[sys.executable, script_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdoutdata, stderrdata = proc.communicate()
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
ret = proc.returncode
assert ret == 0, (
'Import test failed.\n'
'[code]:\n{}\n'
'[stdout]:{!r}\n'
'[stderr]:{!r}'.format(
code, stdoutdata, stderrdata))
def test_import1(self):
self._check('from chainer import cuda; cuda.get_device_from_id')
def test_import2(self):
self._check('import chainer.cuda; chainer.cuda.get_device_from_id')
def test_import3(self):
self._check('import chainer; chainer.cuda.get_device_from_id')
class TestCuda(unittest.TestCase):
def test_get_dummy_device(self):
assert cuda.get_device_from_id(None) is cuda.DummyDevice
@attr.gpu
def test_get_device_from_id_for_numpy_int(self):
assert cuda.get_device_from_id(numpy.int64(0)) == cuda.Device(0)
def test_get_device_from_array_for_numpy_int(self):
assert cuda.get_device_from_array(numpy.int64(0)) is cuda.DummyDevice
@attr.gpu
def test_get_device_for_empty_array(self):
x = cuda.get_device_from_array(cuda.cupy.array([]).reshape((0, 10)))
# TODO(okuta): Only check `assert x == cuda.Device(0)`
# when cupy/cupy#946 is merged
assert x == cuda.Device(0) or x == cuda.DummyDevice
@attr.gpu
@unittest.skipUnless(
six.PY3, 'Python2.7 has a bug in catch_warnings, so this test is '
'skipped for Python2.7')
def test_get_device_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
cuda.get_device(cuda.cupy.array([1]))
assert len(w) == 1
assert w[0].category is DeprecationWarning
assert ('get_device is deprecated. Please use get_device_from_id'
' or get_device_from_array instead.' in str(w[0].message))
@attr.gpu
def test_get_device_from_id(self):
assert cuda.get_device_from_id(0) == cuda.Device(0)
@attr.gpu
def test_get_device_from_array(self):
arr = cuda.cupy.array([0])
assert cuda.get_device_from_array(arr) == cuda.Device(0)
@attr.gpu
def test_get_device_for_int(self):
with testing.assert_warns(DeprecationWarning):
device = cuda.get_device(0)
assert device == cuda.Device(0)
@attr.gpu
@unittest.skipUnless(_builtins_available,
'builtins module is not available')
def test_get_device_from_id_for_builtin_int(self):
# builtins.int is from future package and it is different
# from builtin int/long on Python 2.
assert cuda.get_device_from_id(builtins.int(0)) == cuda.Device(0)
@attr.gpu
@unittest.skipUnless(_builtins_available,
'builtins module is not available')
def test_get_device_for_builtin_int(self):
# builtins.int is from future package and it is different
# from builtin int/long on Python 2.
with testing.assert_warns(DeprecationWarning):
device = cuda.get_device(builtins.int(0))
assert device == cuda.Device(0)
@attr.gpu
def test_get_device_for_device(self):
device = cuda.get_device_from_id(0)
with testing.assert_warns(DeprecationWarning):
assert cuda.get_device(device) is device
def test_to_gpu_unavailable(self):
x = numpy.array([1])
if not cuda.available:
with self.assertRaises(RuntimeError):
cuda.to_gpu(x)
def test_get_array_module_for_numpy_array(self):
xp = cuda.get_array_module(numpy.array([]))
self.assertIs(xp, numpy)
assert xp is not cuda.cupy
def test_get_array_module_for_numpy_variable(self):
xp = cuda.get_array_module(chainer.Variable(numpy.array([])))
assert xp is numpy
assert xp is not cuda.cupy
@attr.gpu
def test_get_array_module_for_cupy_array(self):
xp = cuda.get_array_module(cuda.cupy.array([]))
assert xp is cuda.cupy
assert xp is not numpy
@attr.gpu
def test_get_array_module_for_cupy_variable(self):
xp = cuda.get_array_module(chainer.Variable(cuda.cupy.array([])))
assert xp is cuda.cupy
assert xp is not numpy
def test_cupy_is_not_none(self):
assert cuda.cupy is not None
@testing.parameterize(
{'c_contiguous': True},
{'c_contiguous': False},
)
class TestToCPU(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3))
def test_numpy_array(self):
y = cuda.to_cpu(self.x)
assert self.x is y # Do not copy
@attr.gpu
def test_cupy_array(self):
x = cuda.to_gpu(self.x)
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
y = cuda.to_cpu(x)
assert isinstance(y, numpy.ndarray)
numpy.testing.assert_array_equal(self.x, y)
@attr.multi_gpu(2)
def test_cupy_array2(self):
with cuda.Device(0):
x = cuda.to_gpu(self.x)
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
with cuda.Device(1):
y = cuda.to_cpu(x)
assert isinstance(y, numpy.ndarray)
numpy.testing.assert_array_equal(self.x, y)
@attr.gpu
def test_numpy_array_async(self):
y = cuda.to_cpu(self.x, stream=cuda.Stream())
assert isinstance(y, numpy.ndarray)
assert self.x is y # Do not copy
@attr.gpu
def test_cupy_array_async1(self):
x = cuda.to_gpu(self.x)
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
y = cuda.to_cpu(x, stream=cuda.Stream.null)
assert isinstance(y, numpy.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
@attr.multi_gpu(2)
def test_cupy_array_async2(self):
x = cuda.to_gpu(self.x, device=1)
with x.device:
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
y = cuda.to_cpu(x, stream=cuda.Stream.null)
assert isinstance(y, numpy.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
def test_single_none(self):
assert cuda.to_cpu(None) is None
def _check_list_tuple(self, typ):
assert typ in (list, tuple)
a = numpy.random.uniform(-1, 1, (0,))
b = numpy.random.uniform(-1, 1, (2, 3))
c = cuda.cupy.random.uniform(-1, 1, (0,))
d = cuda.cupy.random.uniform(-1, 1, (2, 2))
xs = typ([a, b, c, d, None, a, b, None, c, d])
xs_cpu = cuda.to_cpu(xs)
assert isinstance(xs_cpu, typ)
assert len(xs) == len(xs_cpu)
for i in (0, 1, 2, 3, 5, 6, 8, 9):
assert isinstance(xs_cpu[i], numpy.ndarray)
cuda.cupy.testing.assert_array_equal(xs[i], xs_cpu[i])
assert xs_cpu[0] is a
assert xs_cpu[1] is b
assert xs_cpu[2] is xs_cpu[8]
assert xs_cpu[3] is xs_cpu[9]
assert xs_cpu[4] is None
assert xs_cpu[5] is a
assert xs_cpu[6] is b
assert xs_cpu[7] is None
@attr.gpu
def test_list(self):
self._check_list_tuple(list)
@attr.gpu
def test_tuple(self):
self._check_list_tuple(tuple)
def test_variable(self):
x = chainer.Variable(self.x)
with self.assertRaises(TypeError):
cuda.to_cpu(x)
@testing.parameterize(*testing.product({
'dtype': [
numpy.bool_, numpy.uint8, numpy.int8, numpy.uint16,
numpy.int16, numpy.uint32, numpy.int32, numpy.uint64,
numpy.int64, numpy.float16, numpy.float32, numpy.float64,
numpy.complex_],
}))
class TestToCPUScalar(unittest.TestCase):
def test_numpy_scalar(self):
dtype = self.dtype
if dtype is numpy.bool_:
x = dtype(True)
elif issubclass(dtype, numpy.complex_):
x = dtype(3.2 - 2.4j)
elif issubclass(dtype, numpy.integer):
x = dtype(3)
elif issubclass(dtype, numpy.floating):
x = dtype(3.2)
else:
assert False
y = cuda.to_cpu(x)
assert isinstance(y, numpy.ndarray)
assert y.shape == ()
assert y.dtype == dtype
assert y == x
@attr.cudnn
class TestWorkspace(unittest.TestCase):
def setUp(self):
self.space = cuda.get_max_workspace_size()
def tearDown(self):
cuda.set_max_workspace_size(self.space)
def test_size(self):
size = 1024
cuda.set_max_workspace_size(size)
assert size == cuda.get_max_workspace_size()
@testing.parameterize(*(testing.product({
'c_contiguous': [True],
'device_dtype': [int, numpy.uint8, numpy.int8, numpy.uint16,
numpy.int16, numpy.uint32, numpy.int32, numpy.uint64,
numpy.int64]
}) + testing.product({
'c_contiguous': [False],
'device_dtype': [int]
}))
)
class TestToGPU(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3))
if not self.c_contiguous:
self.x = self.x.T
@attr.gpu
def test_numpy_array(self):
y = cuda.to_gpu(self.x)
assert isinstance(y, cuda.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
@attr.gpu
def test_cupy_array1(self):
x = cuda.to_gpu(self.x)
y = cuda.to_gpu(x)
assert isinstance(y, cuda.ndarray)
assert x is y # Do not copy
@attr.multi_gpu(2)
def test_cupy_array2(self):
x = cuda.to_gpu(self.x, device=self.device_dtype(0))
with x.device:
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
y = cuda.to_gpu(x, device=self.device_dtype(1))
assert isinstance(y, cuda.ndarray)
assert int(y.device) == 1
@attr.gpu
def test_numpy_array_async(self):
with testing.assert_warns(DeprecationWarning):
y = cuda.to_gpu(self.x, stream=cuda.Stream.null)
assert isinstance(y, cuda.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
@attr.multi_gpu(2)
def test_numpy_array_async2(self):
with testing.assert_warns(DeprecationWarning):
y = cuda.to_gpu(self.x, device=self.device_dtype(1),
stream=cuda.Stream.null)
assert isinstance(y, cuda.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
assert int(y.device) == 1
@attr.multi_gpu(2)
def test_numpy_array_async3(self):
with cuda.Device(1):
with testing.assert_warns(DeprecationWarning):
y = cuda.to_gpu(self.x, stream=cuda.Stream.null)
assert isinstance(y, cuda.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
assert int(y.device) == 1
@attr.gpu
def test_cupy_array_async1(self):
x = cuda.to_gpu(self.x)
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
with testing.assert_warns(DeprecationWarning):
y = cuda.to_gpu(x, stream=cuda.Stream())
assert isinstance(y, cuda.ndarray)
assert x is y # Do not copy
cuda.cupy.testing.assert_array_equal(x, y)
@attr.multi_gpu(2)
def test_cupy_array_async2(self):
x = cuda.to_gpu(self.x, device=self.device_dtype(0))
with x.device:
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
with testing.assert_warns(DeprecationWarning):
y = cuda.to_gpu(x, device=self.device_dtype(1),
stream=cuda.Stream.null)
assert isinstance(y, cuda.ndarray)
assert x is not y # Do copy
cuda.cupy.testing.assert_array_equal(x, y)
@attr.multi_gpu(2)
def test_cupy_array_async3(self):
with cuda.Device(0):
x = cuda.to_gpu(self.x)
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
with cuda.Device(1):
with testing.assert_warns(DeprecationWarning):
y = cuda.to_gpu(x, stream=cuda.Stream.null)
assert isinstance(y, cuda.ndarray)
assert x is not y # Do copy
cuda.cupy.testing.assert_array_equal(x, y)
@attr.gpu
def test_single_none(self):
assert cuda.to_gpu(None) is None
def _check_list_tuple(self, typ):
assert typ in (list, tuple)
a = numpy.random.uniform(-1, 1, (0,))
b = numpy.random.uniform(-1, 1, (2, 3))
c = cuda.cupy.random.uniform(-1, 1, (0,))
d = cuda.cupy.random.uniform(-1, 1, (2, 2))
xs = typ([a, b, c, d, None, a, b, None, c, d])
xs_gpu = cuda.to_gpu(xs)
assert isinstance(xs_gpu, typ)
assert len(xs) == len(xs_gpu)
for i in (0, 1, 2, 3, 5, 6, 8, 9):
assert isinstance(xs_gpu[i], cuda.cupy.ndarray)
cuda.cupy.testing.assert_array_equal(xs[i], xs_gpu[i])
assert xs_gpu[0] is xs_gpu[5]
assert xs_gpu[1] is xs_gpu[6]
assert xs_gpu[2] is c
assert xs_gpu[3] is d
assert xs_gpu[4] is None
assert xs_gpu[7] is None
assert xs_gpu[8] is c
assert xs_gpu[9] is d
@attr.gpu
def test_list(self):
self._check_list_tuple(list)
@attr.gpu
def test_tuple(self):
self._check_list_tuple(tuple)
@attr.gpu
def test_variable_gpu(self):
x = chainer.Variable(self.x)
with self.assertRaises(TypeError):
cuda.to_gpu(x)
@testing.parameterize(*testing.product({
'dtype': [
numpy.bool_, numpy.uint8, numpy.int8, numpy.uint16,
numpy.int16, numpy.uint32, numpy.int32, numpy.uint64,
numpy.int64, numpy.float16, numpy.float32, numpy.float64,
numpy.complex_],
}))
class TestToGPUScalar(unittest.TestCase):
@attr.gpu
def test_numpy_scalar(self):
dtype = self.dtype
if dtype is numpy.bool_:
x = dtype(True)
elif issubclass(dtype, numpy.complex_):
x = dtype(3.2 - 2.4j)
elif issubclass(dtype, numpy.integer):
x = dtype(3)
elif issubclass(dtype, numpy.floating):
x = dtype(3.2)
else:
assert False
y = cuda.to_gpu(x)
assert isinstance(y, cuda.ndarray)
assert y.shape == ()
assert y.dtype == dtype
assert y == x
testing.run_module(__name__, __file__)
|
|
# -*- coding: utf-8 -*-
""" Utilities """
super_entity = s3mgr.model.super_entity
super_link = s3mgr.model.super_link
super_key = s3mgr.model.super_key
s3_action_buttons = s3base.S3CRUD.action_buttons
# -----------------------------------------------------------------------------
def s3_register_validation():
""" JavaScript client-side validation """
# Client-side validation (needed to check for passwords being same)
if request.cookies.has_key("registered"):
password_position = "last"
else:
password_position = "first"
if deployment_settings.get_auth_registration_mobile_phone_mandatory():
mobile = """
mobile: {
required: true
},
"""
else:
mobile = ""
if deployment_settings.get_auth_registration_organisation_mandatory():
org1 = """
organisation_id: {
required: true
},
"""
org2 = "".join(( """,
organisation_id: '""", str(T("Enter your organization")), """',
""" ))
else:
org1 = ""
org2 = ""
domains = ""
if deployment_settings.get_auth_registration_organisation_hidden() and \
request.controller != "admin":
table = auth.settings.table_user
table.organisation_id
table = db.auth_organisation
query = (table.organisation_id != None) & \
(table.domain != None)
whitelists = db(query).select(table.organisation_id,
table.domain)
if whitelists:
domains = """$( '#auth_user_organisation_id__row' ).hide();
S3.whitelists = {
"""
count = 0
for whitelist in whitelists:
count += 1
domains += "'%s': %s" % (whitelist.domain,
whitelist.organisation_id)
if count < len(whitelists):
domains += ",\n"
else:
domains += "\n"
domains += """};
$( '#regform #auth_user_email' ).blur( function() {
var email = $( '#regform #auth_user_email' ).val();
var domain = email.split('@')[1];
if (undefined != S3.whitelists[domain]) {
$( '#auth_user_organisation_id' ).val(S3.whitelists[domain]);
} else {
$( '#auth_user_organisation_id__row' ).show();
}
});
"""
# validate signup form on keyup and submit
# @ToDo: //remote: 'emailsurl'
script = "".join(( domains, """
$('#regform').validate({
errorClass: 'req',
rules: {
first_name: {
required: true
},""", mobile, """
email: {
required: true,
email: true
},""", org1, """
password: {
required: true
},
password_two: {
required: true,
equalTo: '.password:""", password_position, """'
}
},
messages: {
firstname: '""", str(T("Enter your firstname")), """',
password: {
required: '""", str(T("Provide a password")), """'
},
password_two: {
required: '""", str(T("Repeat your password")), """',
equalTo: '""", str(T("Enter the same password as above")), """'
},
email: {
required: '""", str(T("Please enter a valid email address")), """',
minlength: '""", str(T("Please enter a valid email address")), """'
}""", org2, """
},
errorPlacement: function(error, element) {
error.appendTo( element.parent().next() );
},
submitHandler: function(form) {
form.submit();
}
});""" ))
response.s3.jquery_ready.append( script )
# -----------------------------------------------------------------------------
def s3_get_utc_offset():
""" Get the current UTC offset for the client """
offset = None
if auth.is_logged_in():
# 1st choice is the personal preference (useful for GETs if user wishes to see times in their local timezone)
offset = session.auth.user.utc_offset
if offset:
offset = offset.strip()
if not offset:
# 2nd choice is what the client provides in the hidden field (for form POSTs)
offset = request.post_vars.get("_utc_offset", None)
if offset:
offset = int(offset)
utcstr = offset < 0 and "UTC +" or "UTC -"
hours = abs(int(offset/60))
minutes = abs(int(offset % 60))
offset = "%s%02d%02d" % (utcstr, hours, minutes)
# Make this the preferred value during this session
if auth.is_logged_in():
session.auth.user.utc_offset = offset
if not offset:
# 3rd choice is the server default (what most clients should see the timezone as)
offset = deployment_settings.L10n.utc_offset
return offset
# Store last value in session
session.s3.utc_offset = s3_get_utc_offset()
# -----------------------------------------------------------------------------
# Phone number requires
# (defined in s3validators.py)
s3_single_phone_requires = IS_MATCH(single_phone_number_pattern)
s3_phone_requires = IS_MATCH(multi_phone_number_pattern,
error_message=T("Invalid phone number!"))
# -----------------------------------------------------------------------------
# Shorteners
# Names - e.g. when used in Dropdowns
# - unused currently?
repr_select = lambda l: len(l.name) > 48 and "%s..." % l.name[:44] or l.name
# Comments Fields
def comments_represent(text, showlink=True):
if len(text) < 80:
return text
elif not showlink:
return "%s..." % text[:76]
else:
import uuid
unique = uuid.uuid4()
represent = DIV(
DIV(text,
_id=unique,
_class="hidden popup",
_onmouseout="$('#%s').hide();" % unique
),
A("%s..." % text[:76],
_onmouseover="$('#%s').removeClass('hidden').show();" % unique,
),
)
return represent
# -----------------------------------------------------------------------------
# Make URLs clickable
s3_url_represent = lambda url: (url and [A(url, _href=url, _target="blank")] or [""])[0]
# -----------------------------------------------------------------------------
# Date/Time representation functions
s3_date_represent = S3DateTime.date_represent
s3_time_represent = S3DateTime.time_represent
s3_datetime_represent = S3DateTime.datetime_represent
s3_utc_represent = lambda dt: s3_datetime_represent(dt, utc=True)
s3_date_represent_utc = lambda date: s3_date_represent(date, utc=True)
# -----------------------------------------------------------------------------
def s3_filename(filename):
"""
Convert a string into a valid filename on all OS
http://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename-in-python/698714#698714
"""
import string
import unicodedata
validFilenameChars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = unicode(filename)
cleanedFilename = unicodedata.normalize("NFKD",
filename).encode("ASCII", "ignore")
return "".join(c for c in cleanedFilename if c in validFilenameChars)
# -----------------------------------------------------------------------------
def s3_component_form(r, **attr):
""" Custom Method to create a PDF for a component form """
exporter = s3base.S3PDF()
return exporter(r, **attr)
# -----------------------------------------------------------------------------
def s3_include_debug():
"""
Generates html to include:
the js scripts listed in ../static/scripts/tools/sahana.js.cfg
the css listed in ../static/scripts/tools/sahana.css.cfg
"""
# Disable printing
class dummyStream:
""" dummyStream behaves like a stream but does nothing. """
def __init__(self): pass
def write(self,data): pass
def read(self,data): pass
def flush(self): pass
def close(self): pass
save_stdout = sys.stdout
# redirect all print deals
sys.stdout = dummyStream()
scripts_dir_path = "applications/%s/static/scripts" % request.application
# Get list of script files
sys.path.append( "%s/tools" % scripts_dir_path)
import mergejsmf
configDictCore = {
"web2py": scripts_dir_path,
"T2": scripts_dir_path,
"S3": scripts_dir_path
}
configFilename = "%s/tools/sahana.js.cfg" % scripts_dir_path
(fs, files) = mergejsmf.getFiles(configDictCore, configFilename)
# Enable print
sys.stdout = save_stdout
include = ""
for file in files:
include = '%s\n<script src="/%s/static/scripts/%s" type="text/javascript"></script>' \
% ( include,
request.application,
file)
include = "%s\n <!-- CSS Syles -->" % include
f = open("%s/tools/sahana.css.cfg" % scripts_dir_path, "r")
files = f.readlines()
for file in files[:-1]:
include = '%s\n<link href="/%s/static/styles/%s" rel="stylesheet" type="text/css" />' \
% ( include,
request.application,
file[:-1]
)
f.close()
return XML(include)
# -----------------------------------------------------------------------------
def s3_represent_multiref(table, opt, represent=None, separator=", "):
""" Produce a representation for a list:reference field. """
if represent is None:
if "name" in table.fields:
represent = lambda r: r and r.name or UNKNOWN_OPT
if isinstance(opt, (int, long, str)):
query = (table.id == opt)
else:
query = (table.id.belongs(opt))
if "deleted" in table.fields:
query = query & (table.deleted == False)
records = db(query).select()
if records:
try:
first = represent(records[0])
rep_function = represent
except TypeError:
first = represent % records[0]
rep_function = lambda r: represent % r
# NB join only operates on strings, and some callers provide A().
results = [first]
for record in records[1:]:
results.append(separator)
results.append(rep_function(record))
# Wrap in XML to allow showing anchors on read-only pages, else
# Web2py will escape the angle brackets, etc. The single-record
# location represent produces A() (unless told not to), and we
# want to show links if we can.
return XML(DIV(*results))
else:
return UNKNOWN_OPT
# -----------------------------------------------------------------------------
def s3_table_links(reference):
"""
Return a dict of tables & their fields which have references to the
specified table
@deprecated: to be replaced by db[tablename]._referenced_by
- used by controllers/gis.py & pr.py
"""
tables = {}
for table in db.tables:
count = 0
for field in db[table].fields:
if str(db[table][field].type) == "reference %s" % reference:
if count == 0:
tables[table] = {}
tables[table][count] = field
count += 1
return tables
# -----------------------------------------------------------------------------
def s3_rheader_tabs(r, tabs=[], paging=False):
"""
Constructs a DIV of component links for a S3RESTRequest
@param tabs: the tabs as list of tuples (title, component_name, vars),
where vars is optional
@param paging: add paging buttons previous/next to the tabs
@todo: move into S3CRUD
"""
rheader_tabs = []
tablist = []
previous = next = None
# Check for r.method tab
mtab = r.component is None and \
[t[1] for t in tabs if t[1] == r.method] and True or False
for i in xrange(len(tabs)):
record_id = r.id
title, component = tabs[i][:2]
vars_in_request = True
if len(tabs[i]) > 2:
_vars = Storage(tabs[i][2])
for k,v in _vars.iteritems():
if r.get_vars.get(k) != v:
vars_in_request = False
break
if "viewing" in r.get_vars:
_vars.viewing = r.get_vars.viewing
else:
_vars = r.get_vars
here = False
if component and component.find("/") > 0:
function, component = component.split("/", 1)
if not component:
component = None
else:
if "viewing" in _vars:
tablename, record_id = _vars.viewing.split(".", 1)
function = tablename.split("_", 1)[1]
else:
function = r.function
record_id = r.id
if function == r.name or \
(function == r.function and "viewing" in _vars):
here = r.method == component or not mtab
if i == len(tabs)-1:
tab = Storage(title=title, _class = "tab_last")
else:
tab = Storage(title=title, _class = "tab_other")
if i > 0 and tablist[i-1]._class == "tab_here":
next = tab
if component:
if r.component and r.component.alias == component and vars_in_request or \
r.custom_action and r.method == component:
tab.update(_class = "tab_here")
previous = i and tablist[i-1] or None
if record_id:
args = [record_id, component]
else:
args = [component]
vars = Storage(_vars)
if "viewing" in vars:
del vars["viewing"]
tab.update(_href=URL(function, args=args, vars=vars))
else:
if not r.component and len(tabs[i]) <= 2 and here:
tab.update(_class = "tab_here")
previous = i and tablist[i-1] or None
vars = Storage(_vars)
args = []
if function != r.name:
if "viewing" not in vars and r.id:
vars.update(viewing="%s.%s" % (r.tablename, r.id))
#elif "viewing" in vars:
elif not tabs[i][1]:
if "viewing" in vars:
del vars["viewing"]
args = [record_id]
else:
if "viewing" not in vars and record_id:
args = [record_id]
tab.update(_href=URL(function, args=args, vars=vars))
tablist.append(tab)
rheader_tabs.append(SPAN(A(tab.title, _href=tab._href), _class=tab._class))
if rheader_tabs:
if paging:
if next:
rheader_tabs.insert(0, SPAN(A(">", _href=next._href), _class="tab_next_active"))
else:
rheader_tabs.insert(0, SPAN(">", _class="tab_next_inactive"))
if previous:
rheader_tabs.insert(0, SPAN(A("<", _href=previous._href), _class="tab_prev_active"))
else:
rheader_tabs.insert(0, SPAN("<", _class="tab_prev_inactive"))
rheader_tabs = DIV(rheader_tabs, _class="tabs")
else:
rheader_tabs = ""
return rheader_tabs
# -----------------------------------------------------------------------------
def s3_rheader_resource(r):
"""
Identify the tablename and record ID for the rheader
@param r: the current S3Request
"""
_vars = r.get_vars
if "viewing" in _vars:
tablename, record_id = _vars.viewing.rsplit(".", 1)
record = db[tablename][record_id]
else:
tablename = r.tablename
record = r.record
return (tablename, record)
# -----------------------------------------------------------------------------
def sort_dict_by_values(adict):
"""
Sort a dict by value and return an OrderedDict.
- used by models/05_irs.py
"""
return OrderedDict(sorted(adict.items(), key = lambda item: item[1]))
# -----------------------------------------------------------------------------
# CRUD functions
# -----------------------------------------------------------------------------
def s3_barchart(r, **attr):
"""
Provide simple barcharts for resource attributes
SVG representation uses the SaVaGe library
Need to request a specific value to graph in request.vars
used as REST method handler for S3Resources
@todo: replace by a S3MethodHandler
"""
# Get all the variables and format them if needed
valKey = r.vars.get("value")
nameKey = r.vars.get("name")
if not nameKey and r.table.get("name"):
# Try defaulting to the most-commonly used:
nameKey = "name"
# The parameter value is required; it must be provided
# The parameter name is optional; it is useful, but we don't need it
# Here we check to make sure we can find value in the table,
# and name (if it was provided)
if not r.table.get(valKey):
raise HTTP (400, s3mgr.xml.json_message(success=False, status_code="400", message="Need a Value for the Y axis"))
elif nameKey and not r.table.get(nameKey):
raise HTTP (400, s3mgr.xml.json_message(success=False, status_code="400", message=nameKey + " attribute not found in this resource."))
start = request.vars.get("start")
if start:
start = int(start)
limit = r.vars.get("limit")
if limit:
limit = int(limit)
settings = r.vars.get("settings")
if settings:
settings = json.loads(settings)
else:
settings = {}
if r.representation.lower() == "svg":
r.response.headers["Content-Type"] = "image/svg+xml"
from savage import graph
bar = graph.BarGraph(settings=settings)
title = deployment_settings.modules.get(module).name_nice
bar.setTitle(title)
if nameKey:
xlabel = r.table.get(nameKey).label
if xlabel:
bar.setXLabel(str(xlabel))
else:
bar.setXLabel(nameKey)
ylabel = r.table.get(valKey).label
if ylabel:
bar.setYLabel(str(ylabel))
else:
bar.setYLabel(valKey)
try:
records = r.resource.load(start, limit)
for entry in r.resource:
val = entry[valKey]
# Can't graph None type
if not val is None:
if nameKey:
name = entry[nameKey]
else:
name = None
bar.addBar(name, val)
return bar.save()
# If the field that was provided was not numeric, we have problems
except ValueError:
raise HTTP(400, "Bad Request")
else:
raise HTTP(501, body=BADFORMAT)
# -----------------------------------------------------------------------------
def s3_copy(r, **attr):
"""
Copy a record
used as REST method handler for S3Resources
@todo: move into S3CRUDHandler
"""
redirect(URL(args="create", vars={"from_record":r.id}))
# -----------------------------------------------------------------------------
def s3_import_prep(import_data):
"""
Example for an import pre-processor
@param import_data: a tuple of (resource, tree)
"""
resource, tree = import_data
#print "Import to %s" % resource.tablename
#print s3mgr.xml.tostring(tree, pretty_print=True)
# Use this to skip the import:
#resource.skip_import = True
# Import pre-process
# This can also be a Storage of {tablename = function}*
s3mgr.import_prep = s3_import_prep
# -----------------------------------------------------------------------------
def s3_rest_controller(prefix=None, resourcename=None, **attr):
"""
Helper function to apply the S3Resource REST interface
@param prefix: the application prefix
@param resourcename: the resource name (without prefix)
@param attr: additional keyword parameters
Any keyword parameters will be copied into the output dict (provided
that the output is a dict). If a keyword parameter is callable, then
it will be invoked, and its return value will be added to the output
dict instead. The callable receives the S3Request as its first and
only parameter.
CRUD can be configured per table using:
s3mgr.configure(tablename, **attr)
*** Redirection:
create_next URL to redirect to after a record has been created
update_next URL to redirect to after a record has been updated
delete_next URL to redirect to after a record has been deleted
*** Form configuration:
list_fields list of names of fields to include into list views
subheadings Sub-headings (see separate documentation)
listadd Enable/Disable add-form in list views
*** CRUD configuration:
editable Allow/Deny record updates in this table
deletable Allow/Deny record deletions in this table
insertable Allow/Deny record insertions into this table
copyable Allow/Deny record copying within this table
*** Callbacks:
create_onvalidation Function/Lambda for additional record validation on create
create_onaccept Function/Lambda after successful record insertion
update_onvalidation Function/Lambda for additional record validation on update
update_onaccept Function/Lambda after successful record update
onvalidation Fallback for both create_onvalidation and update_onvalidation
onaccept Fallback for both create_onaccept and update_onaccept
ondelete Function/Lambda after record deletion
"""
# Parse the request
r = s3mgr.parse_request(prefix, resourcename)
# Set method handlers
r.set_handler("copy", s3_copy)
r.set_handler("barchart", s3_barchart)
r.set_handler("analyze", s3base.S3Cube())
r.set_handler("import", s3base.S3PDF(),
http = ["GET", "POST"],
representation="pdf")
r.set_handler("import", s3base.S3Importer(), transform=True)
# Execute the request
output = r(**attr)
if isinstance(output, dict) and (not r.method or r.method in ("analyze", "search")):
if response.s3.actions is None:
# Add default action buttons
prefix, name, table, tablename = r.target()
authorised = s3_has_permission("update", tablename)
# If the component has components itself, then use the
# component's native controller for CRU(D) => make sure
# you have one, or override by native=False
if r.component and s3mgr.model.has_components(table):
native = output.get("native", True)
else:
native = False
# Get table config
model = s3mgr.model
listadd = model.get_config(tablename, "listadd", True)
editable = model.get_config(tablename, "editable", True) and \
not auth.permission.ownership_required(table, "update")
deletable = model.get_config(tablename, "deletable", True)
copyable = model.get_config(tablename, "copyable", False)
# URL to open the resource
open_url = r.resource.crud._linkto(r,
authorised=authorised,
update=editable,
native=native)("[id]")
# Add action buttons for Open/Delete/Copy as appropriate
s3_action_buttons(r,
deletable=deletable,
copyable=copyable,
editable=editable,
read_url=open_url,
update_url=open_url)
# Override Add-button, link to native controller and put
# the primary key into vars for automatic linking
if native and not listadd and \
s3_has_permission("create", tablename):
label = s3base.S3CRUD.crud_string(tablename,
"label_create_button")
hook = r.resource.components[name]
fkey = "%s.%s" % (name, hook.fkey)
vars = request.vars.copy()
vars.update({fkey: r.id})
url = URL(prefix, name, args=["create"], vars=vars)
add_btn = A(label, _href=url, _class="action-btn")
output.update(add_btn=add_btn)
elif r.method != "import":
response.s3.actions = None
return output
# END =========================================================================
|
|
from stubydoo import double
from tn.plonebehavior.template import interfaces
from tn.plonebehavior.template import NullTemplateConfiguration
from tn.plonebehavior.template.compilation_strategy import StyledPageCompilationStrategy
from tn.plonebehavior.template.compilation_strategy import CompilationStrategy
from zope.app.testing import placelesssetup
import lxml.html
import stubydoo
import unittest
import zope.component
import zope.interface
class TestCompilationStrategy(unittest.TestCase):
def setUp(self):
placelesssetup.setUp(self)
self.document = stubydoo.double()
self.document.body = u'<p>Test!</p>'
self.config = stubydoo.double()
@zope.component.adapter(None)
@zope.interface.implementer(interfaces.IHTMLBody)
def body_attribute(doc):
return stubydoo.double(__unicode__=lambda self:doc.body)
zope.component.provideAdapter(body_attribute)
self.compiler = CompilationStrategy(self.document, self.config)
def tearDown(self):
placelesssetup.tearDown()
def test_element_selection(self):
self.config.html = u"<html><body><div></div></body></html>"
self.config.xpath = u'/html/body/div'
result = self.compiler.compile()
self.assertEquals(result,
u'<html><body><div><p>Test!</p></div></body></html>')
def test_element_replacement(self):
self.config.html = u"<html><body><div>This should go away</div></body></html>"
self.config.xpath = u'/html/body/div'
result = self.compiler.compile()
self.assertEquals(result,
u'<html><body><div><p>Test!</p></div></body></html>')
def test_element_children_replacement(self):
self.config.html = u"<html><body><div><a>a</a><br /></div></body></html>"
self.config.xpath = u'/html/body/div'
result = self.compiler.compile()
self.assertEquals(result,
u'<html><body><div><p>Test!</p></div></body></html>')
def test_element_tail_replacement(self):
self.config.html = u"<html><body><div><a>a</a>Tail</div></body></html>"
self.config.xpath = u'/html/body/div'
result = self.compiler.compile()
self.assertEquals(result,
u'<html><body><div><p>Test!</p></div></body></html>')
def test_attribute_selection_returns_unmodified_tree(self):
self.config.html = u'<html><body><div dir="ltr"></div></body></html>'
self.config.xpath = u'/html/body/div/@dir'
result = self.compiler.compile()
self.assertEquals(result, self.config.html)
def test_arbitrary_xpath_returns_unmodified_tree(self):
self.config.html = u'<html><body><div dir="ltr"></div></body></html>'
self.config.xpath = u'1+1'
result = self.compiler.compile()
self.assertEquals(result, self.config.html)
def test_none_xpath_returns_unmodified_tree(self):
self.config.html = u'<html><body><div dir="ltr"></div></body></html>'
self.config.xpath = None
result = self.compiler.compile()
self.assertEquals(result, self.config.html)
def test_blank_xpath_returns_unmodified_tree(self):
self.config.html = u'<html><body><div dir="ltr"></div></body></html>'
self.config.xpath = u''
result = self.compiler.compile()
self.assertEquals(result, self.config.html)
class TestCompilationStrategyWithNullConfiguration(unittest.TestCase):
def setUp(self):
placelesssetup.setUp(self)
def tearDown(self):
placelesssetup.tearDown()
def test_compilation_with_default_xpath_and_css(self):
context = double()
configuration = double(
xpath=NullTemplateConfiguration.xpath,
css=NullTemplateConfiguration.css,
html=u'<html><body></body></html>'
)
@zope.component.adapter(None)
@zope.interface.implementer(interfaces.IHTMLBody)
def body_adapter(context):
return double(__unicode__=lambda self:u'<p>Hello</p>')
zope.component.provideAdapter(body_adapter)
expected_body = u'<body><p>Hello</p></body>'
strategy = CompilationStrategy(context, configuration)
result = strategy.compile()
resulting_body = lxml.html.document_fromstring(result).\
xpath(u'//body')[0]
self.assertEquals(
expected_body,
lxml.html.tostring(resulting_body)
)
class BaseTestStyledPageCompilationStrategy(unittest.TestCase):
def setUp(self):
placelesssetup.setUp(self)
self.config = stubydoo.double()
# This kind of document uses a rich text field, which has an output
# attribute.
self.document = stubydoo.double(body=stubydoo.double())
self.document.body = u'<p>Test!</p>'
@zope.component.adapter(None)
@zope.interface.implementer(interfaces.IHTMLBody)
def html_body(doc):
return doc.body
zope.component.provideAdapter(html_body)
self.compiler = StyledPageCompilationStrategy(self.document, self.config)
# This 'stubbing' relies on the fact that the function is accessed
# through module's getattr always, no references kept.
from tn.plonestyledpage import styled_page
self.old_getUniqueId = styled_page.getUniqueId
styled_page.getUniqueId = lambda page: u'foo'
self.old_getEscapedStyleBlock = styled_page.getEscapedStyleBlock
styled_page.getEscapedStyleBlock = lambda page: u'<style>%s</style>' % page.styles
self.document.styles = u'p{color:red}'
def tearDown(self):
placelesssetup.tearDown()
from tn.plonestyledpage import styled_page
styled_page.getUniqueId = self.old_getUniqueId
styled_page.getEscapedStyleBlock = self.old_getEscapedStyleBlock
class TestStyledPageCompilationStrategy(BaseTestStyledPageCompilationStrategy):
def test_element_selection(self):
self.config.html = u"<html><head></head><body></body></html>"
self.config.xpath = u'/html/body'
result = self.compiler.compile()
self.assertTrue(u'<body><div id="foo"><p>Test!</p></div></body>' in result)
def test_element_replacement(self):
self.config.html = u"<html><head></head><body>This should go away</body></html>"
self.config.xpath = u'/html/body'
result = self.compiler.compile()
self.assertTrue(u'<body><div id="foo"><p>Test!</p></div></body>' in result)
def test_element_children_replacement(self):
self.config.html = u"<html><head></head><body><a>a</a><br /></body></html>"
self.config.xpath = u'/html/body'
result = self.compiler.compile()
self.assertTrue(u'<body><div id="foo"><p>Test!</p></div></body>' in result)
def test_element_tail_replacement(self):
self.config.html = u"<html><head></head><body><a>a</a>Tail</body></html>"
self.config.xpath = u'/html/body'
result = self.compiler.compile()
self.assertTrue(u'<body><div id="foo"><p>Test!</p></div></body>' in result)
def test_styles_are_added_into_head(self):
self.config.html = u"<html><head></head><body></body></html>"
self.config.xpath = u'/html/body'
result = self.compiler.compile()
self.assertTrue(u'<head><style>p{color:red}</style></head>' in result)
def test_original_head_content_is_kept_before_styles(self):
self.config.html = u'<html><head><title>Bar</title><style>p{color:green}</style></head><body></body></html>'
self.config.xpath = u'/html/body'
result = self.compiler.compile()
self.assertTrue(u'<head><title>Bar</title><style>p{color:green}</style><style>p{color:red}</style></head>' in result)
def test_head_is_created_if_not_present(self):
self.config.html = u"<html><body></body></html>"
self.config.xpath = u'/html/body'
result = self.compiler.compile()
self.assertTrue(u'<head><style>p{color:red}</style></head>' in result)
def test_attribute_selection_returns_unmodified_body(self):
self.config.html = u'<html><head></head><body dir="ltr"></body></html>'
self.config.xpath = u'/html/body/@dir'
result = self.compiler.compile()
self.assertTrue(u'</head><body dir="ltr"></body></html>' in result)
def test_attribute_selection_still_adds_styles(self):
self.config.html = u'<html><head></head><body dir="ltr"></body></html>'
self.config.xpath = u'/html/body/@dir'
result = self.compiler.compile()
self.assertTrue(u'<head><style>p{color:red}</style></head>' in result)
def test_arbitrary_xpath_returns_unmodified_body(self):
self.config.html = u'<html><head></head><body dir="ltr"></body></html>'
self.config.xpath = u'1+1'
result = self.compiler.compile()
self.assertTrue(u'</head><body dir="ltr"></body></html>' in result)
def test_arbitrary_xpath_still_adds_styles(self):
self.config.html = u'<html><head></head><body><div dir="ltr"></div></body></html>'
self.config.xpath = u'1+1'
result = self.compiler.compile()
self.assertTrue(u'<head><style>p{color:red}</style></head>' in result)
def test_none_xpath_returns_unmodified_body(self):
self.config.html = u'<html><head></head><body dir="ltr"></body></html>'
self.config.xpath = None
result = self.compiler.compile()
self.assertTrue(u'</head><body dir="ltr"></body></html>' in result)
def test_none_xpath_still_adds_styles(self):
self.config.html = u'<html><head></head><body><div dir="ltr"></div></body></html>'
self.config.xpath = None
result = self.compiler.compile()
self.assertTrue(u'<head><style>p{color:red}</style></head>' in result)
def test_blank_xpath_returns_unmodified_body(self):
self.config.html = u'<html><head></head><body dir="ltr"></body></html>'
self.config.xpath = u''
result = self.compiler.compile()
self.assertTrue(u'</head><body dir="ltr"></body></html>' in result)
def test_blank_xpath_still_adds_styles(self):
self.config.html = u'<html><head></head><body><div dir="ltr"></div></body></html>'
self.config.xpath = u''
result = self.compiler.compile()
self.assertTrue(u'<head><style>p{color:red}</style></head>' in result)
class TestStyledPageCompilationStrategyWithNullConfiguration(
BaseTestStyledPageCompilationStrategy
):
def setUp(self):
super(TestStyledPageCompilationStrategyWithNullConfiguration,
self).setUp()
self.config = double(
xpath=NullTemplateConfiguration.xpath,
css=NullTemplateConfiguration.css,
html=u'<html><body></body></html>'
)
self.compiler = StyledPageCompilationStrategy(self.document, self.config)
def test_compilation_with_default_xpath_and_css(self):
expected_body = u'<body><div id="foo">%s</div></body>' % self.document.body
result = self.compiler.compile()
resulting_body = lxml.html.document_fromstring(result).\
xpath(u'//body')[0]
self.assertEquals(
expected_body,
lxml.html.tostring(resulting_body)
)
|
|
import datetime
from functools import partial
import inspect
import pydoc
import numpy as np
import pandas as pd
import break4w._defaults as b4wdefaults
class Question:
u"""A base object class for handling Data dictionary entries
"""
true_values = b4wdefaults.true_values
false_values = b4wdefaults.false_values
ebi_null = b4wdefaults.ebi_null
defaults = b4wdefaults.defaults
var_str_format = {str: '%s', int: '%i', float: '%1.5f', bool: '%s'}
def __init__(self, name, description, dtype, clean_name=None,
question=None, format=None,
free_response=False, mimarks=False, ontology=None,
missing=None, blanks=None, colormap=None, original_name=None,
source_columns=None, derivative_columns=None, notes=None,
**other_properties):
u"""A base object for describing single question outputs
The Question Object is somewhat limited in its functionality. For most
questions in the dataset, it is better to use a child object with the
appropriate question type (i.e. Categorical, Bool, Continous, Dates).
Parameters
----------
name : str
The name of a column in a microbiome mapping file where metadata
describing a clincial or enviromental factor is stored.
description : str
A brief description of the biological relevance of the information
in the column. This can also be used to clarify acronyms or
definations.
dtype : object
The datatype in which the responses should be represented. (i.e.
`float`, `int`, `str`).
clean_name : str, optional
A nicer version of the way the column should be named. This can be
used for display in figures. If nothing is provided, the column
name will be coverted to a title by replacing an underscores with
spaces and converting to title case.
mimarks : bool, optional
If the question was a mimarks standard field
ontology : str, optional
The type of ontology, if any, used to answer the question. An
ontology provides a consistent, structured vocabulary. A list
of ontologies can be found at https://www.ebi.ac.uk/ols/ontologies
missing : str, list, optional
Acceptable missing values. Missing values will be used to validate
all values in the column. Specified missing values can also be
ignored during analysis if correctly specified.
blanks: str, list, optional
Value to represent experimental blanks, if relevent.
colormap: str, iterable, optional
The colors to use when plotting the data. This can be a matplotlib
colormap object, a string describing a matplotlib compatable
colormap (i.e. `'RdBu'`), or an iterable of matplotlib compatable
color values.
original_name: str, optional
The name of the column in a previous iteration of the metadata
(often the version of the metadata provided by the collaborator).
source_columns: list, optional
Other columns in the mapping file used to create this column.
derivative_columns: list, optional
Any columns whose data is derived from the data in this column.
notes: str, optional
Any additional notes about the column, such as information
about the data source, manual correction if it happened, etc.
Basically any free text information someone should know about
the column.
Raises
------
TypeError
The name is not a string
TypeError
The description is not a string
TypeError
The dtype is not a class
TypeError
The `clean_name` is not a string.
"""
# Checks the core arguments
if not isinstance(name, str):
raise TypeError('name must be a string.')
if not isinstance(description, str):
raise TypeError('description must be a string')
elif len(description) > 80:
raise ValueError('The description must be less than 80 '
'characters')
if not inspect.isclass(dtype):
raise TypeError('dtype must be a class')
if not isinstance(clean_name, str) and clean_name is not None:
raise TypeError('If supplied, clean_name must be a string')
# Handles the main information about the data
self.name = name
self.description = description
self.dtype = dtype
self.type = 'Question'
if clean_name is None:
self.clean_name = name.replace('_', ' ').title()
else:
self.clean_name = clean_name
# Sets up
self.free_response = free_response
self.mimarks = mimarks
self.ontology = ontology
if missing is None:
self.missing = self.ebi_null
elif isinstance(missing, str):
self.missing = set([missing])
else:
self.missing = set(missing)
self.blanks = blanks
self.colormap = _check_cmap(colormap)
self.original_name = original_name
if source_columns is None:
self.source_columns = []
else:
self.source_columns = source_columns
if derivative_columns is None:
self.derivative_columns = []
else:
self.derivative_columns = derivative_columns
self.notes = notes
for k, v in other_properties.items():
setattr(self, k, v)
self.log = []
def __str__(self):
u"""Prints a nice summary of the object"""
s_ = """
------------------------------------------------------------------------------------
{name} (Question {dtype_str})
{description}
------------------------------------------------------------------------------------
"""
return s_.format(**{
'name': self.name,
'dtype_str': self._iterable_to_str(self.dtype),
'description': self.description,
})
def _update_log(self, command, transform_type, transformation):
u"""A helper function to update the in-object documentation object
Every time a Question acts on data, a record should be made of
the transformation. This function standardized the format of that
recording by tracking the time, location, and command.
Examples of how the function is used can be found in all functions
which operate on a `map_` variable.
Parameters
----------
command : str
A short textual description of the command performed. This
may be the function name in text format.
transform_type: str
A more general description of the type of action that was
performed. Ideally, this comes for a preset list of possible
actions, and the descriptions are consistent.
transformation: str
Explains exactly how values were changed.
"""
self.log.append({
'timestamp': datetime.datetime.now(),
'column': self.name,
'command': command,
'transform_type': transform_type,
'transformation': transformation,
})
def write_provenance(self):
"""Writes the question provenance to a string
Returns
-------
DataFrame
A pandas dataframe describing the time, column, action taken,
action type, and changes in the data.
"""
self._update_log('Write Log', 'recording', '')
return pd.DataFrame(self.log)[['timestamp', 'command', 'column',
'transform_type', 'transformation']]
def _read_provenance(self, fp_):
"""
Reads the existing question provenance
to be added!
"""
raise NotImplementedError
def _check_ontology(self):
"""
Checks the ontology associated with the question
To be added!
"""
raise NotImplementedError
@staticmethod
def _iterable_to_str(val_, code_delim='=', var_delim=' | ', var_str='%s',
code_str='%s', null_value='None'):
"""
Converts a list or dict into a delimited string for reading
"""
def _to_str(x):
if pd.isnull(x):
return null_value
else:
return var_str % x
if (isinstance(val_, (list, set, tuple, np.ndarray, dict)) and
len(val_) == 0):
return null_value
if isinstance(val_, (list, set, tuple, np.ndarray)):
return var_delim.join([_to_str(v) for v in val_])
elif isinstance(val_, dict):
return var_delim.join([
('%s%s%s' % (var_str, code_delim, code_str )) % (k, v)
for k, v in val_.items()
])
elif val_ is None:
return null_value
else:
return str(val_).replace("<class '", '').replace("'>", "")
@staticmethod
def _iterable_from_str(val_, code_delim='=', var_delim=' | ',
var_type=str, code_type=str, null_value=np.nan, return_type=set):
"""
Converts a delimited string into a list or dict
"""
def check_null(x):
if (x in {null_value, 'None', None}):
return None
else:
return var_type(x)
if val_ in {null_value, 'None', None}:
return None
elif code_delim in val_:
def get_k(v):
return var_type(v.split(code_delim)[0])
def get_v(v):
return code_type(v.split(code_delim)[1])
return {get_k(x): get_v(x) for x in val_.split(var_delim)}
elif var_delim in val_:
return return_type([check_null(x) for x in val_.split(var_delim)])
# elif val_ in {null_value, 'None', None}:
# return None
else:
return return_type([check_null(val_)])
def _to_series(self, code_delim='=', var_delim=' | ',
var_str=None, code_str='%s', null_value='None'):
"""Formats data as a series of text values"""
tent_dict = self.__dict__.items()
def _check_dict(k, v):
if k in {'log'}:
return False
elif ((v is None) or
(isinstance(v, (list, set, dict)) and (len(v) == 0))):
return False
elif ((k in self.defaults) and (self.defaults[k] == v)):
return False
else:
return True
if var_str is None:
var_str = self.var_str_format.get(self.dtype, '%s')
f_ = partial(self._iterable_to_str, code_delim=code_delim,
var_delim=var_delim, var_str=var_str, code_str=code_str,
null_value=null_value)
return pd.Series({k: f_(v) for k, v in tent_dict
if _check_dict(k, v)})
def _to_usgs(self):
"""Converts question object to usgs xml format
see: https://www.usgs.gov/products/data-and-tools/data-management/data-dictionaries
"""
pass
@classmethod
def _read_series(cls, var_, var_delim=' | ', code_delim='=', null_value='None'):
"""
Builds a question object off a series
Parameters
----------
var_: Series
The series containing the parameters
var_delim: str, optional
The seperator between values in the "order" column.
code_delim: str, optional
The delimiter between a numericly coded categorical variable and
the value it maps to.
Returns
-------
Question
"""
# Drops out type, if necessary
if 'type' in var_:
var_.drop('type', inplace=True)
# Extracts the datatype
dtype_ = pydoc.locate(var_['dtype'])
var_['dtype'] = dtype_
i_param = {'code_delim': code_delim,
'var_delim': var_delim,
'null_value': null_value,
'var_type': dtype_}
def _handle_col(k, v):
if pd.isnull(v) or v == str(null_value):
return None
if k == 'colormap':
return _check_cmap(v)
elif (k == 'ref_value') and (dtype_ is bool):
return pydoc.locate(v.title())
elif (k == 'ref_value'):
return dtype_(v)
elif (k in {'order', 'limits'}) and (dtype_ is bool):
s_ = cls._iterable_from_str(
v, return_type=list, code_delim=code_delim,
var_delim=var_delim, null_value=null_value)
return [pydoc.locate(v_.title()) for v_ in s_]
elif k in {'order', 'limits'}:
return cls._iterable_from_str(v, return_type=list, **i_param)
elif k in b4wdefaults.properties_num:
return float(v)
elif k in b4wdefaults.properties_bin:
return pydoc.locate(v.title())
elif k in b4wdefaults.properties_set:
return cls._iterable_from_str(v, **i_param)
else:
return v
dict_ = {k: _handle_col(k, v) for k, v in var_.iteritems()
if (not (pd.isnull(v) or v == str(null_value)) or
not k in {'type', 'dtype'})}
if ('order' in dict_) and isinstance(dict_['order'], dict):
part_ = dict_['order']
dict_['order'] = [dtype_(k) for k in part_.keys()]
dict_['var_labels'] = part_
return cls(**dict_)
@staticmethod
def _identify_remap_function(dtype, placeholders=None,
true_values=true_values, false_values=false_values):
"""
Selects an appropriate function to convert data from str to dtype
Parameters
----------
dtype : object
The datatype in which the responses should be represented. (i.e.
`float`, `int`, `str`).
placeholders : set, optional
Acceptable values to be ignored representing either placeholder
values such as text for missing values, blanks, or ambigious
measurements.
true_values : set, optional
Acceptable values for true values for boolean data
false_values : set, optional
Acceptable values for false values for boolean data
Returns
-------
Function
A function to convert the strings to the correct data type. The
function will return "error" if the value cannot be cast
appropriately.
"""
if placeholders is None:
placeholders = []
if dtype == bool:
# Converts any non-placeholder string values to lowercase
def clean_up_strings(x):
if isinstance(x, str) and (x not in placeholders):
return x.lower()
else:
return x
def remap_(x):
if (x in placeholders) or (pd.isnull(x)):
return x
x = clean_up_strings(x)
if x in true_values:
return True
elif x in false_values:
return False
else:
return 'error'
else:
# Defines a function to clean up all other datatypes
def remap_(x):
if (x in placeholders) or pd.isnull(x):
return x
else:
try:
return dtype(x)
except:
return 'error'
return remap_
def _check_cmap(cmap, num_colors=None, range=None):
return cmap
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.template}
"""
from cStringIO import StringIO
from zope.interface.verify import verifyObject
from twisted.internet.defer import succeed, gatherResults
from twisted.python.filepath import FilePath
from twisted.trial.unittest import TestCase
from twisted.trial.util import suppress as SUPPRESS
from twisted.web.template import (
Element, TagLoader, renderer, tags, XMLFile, XMLString)
from twisted.web.iweb import ITemplateLoader
from twisted.web.error import (FlattenerError, MissingTemplateLoader,
MissingRenderMethod)
from twisted.web.template import renderElement
from twisted.web._element import UnexposedMethodError
from twisted.web.test._util import FlattenTestCase
from twisted.web.test.test_web import DummyRequest
from twisted.web.server import NOT_DONE_YET
_xmlFileSuppress = SUPPRESS(category=DeprecationWarning,
message="Passing filenames or file objects to XMLFile is "
"deprecated since Twisted 12.1. Pass a FilePath instead.")
class TagFactoryTests(TestCase):
"""
Tests for L{_TagFactory} through the publicly-exposed L{tags} object.
"""
def test_lookupTag(self):
"""
HTML tags can be retrieved through C{tags}.
"""
tag = tags.a
self.assertEqual(tag.tagName, "a")
def test_lookupHTML5Tag(self):
"""
Twisted supports the latest and greatest HTML tags from the HTML5
specification.
"""
tag = tags.video
self.assertEqual(tag.tagName, "video")
def test_lookupTransparentTag(self):
"""
To support transparent inclusion in templates, there is a special tag,
the transparent tag, which has no name of its own but is accessed
through the "transparent" attribute.
"""
tag = tags.transparent
self.assertEqual(tag.tagName, "")
def test_lookupInvalidTag(self):
"""
Invalid tags which are not part of HTML cause AttributeErrors when
accessed through C{tags}.
"""
self.assertRaises(AttributeError, getattr, tags, "invalid")
def test_lookupXMP(self):
"""
As a special case, the <xmp> tag is simply not available through
C{tags} or any other part of the templating machinery.
"""
self.assertRaises(AttributeError, getattr, tags, "xmp")
class ElementTests(TestCase):
"""
Tests for the awesome new L{Element} class.
"""
def test_missingTemplateLoader(self):
"""
L{Element.render} raises L{MissingTemplateLoader} if the C{loader}
attribute is C{None}.
"""
element = Element()
err = self.assertRaises(MissingTemplateLoader, element.render, None)
self.assertIdentical(err.element, element)
def test_missingTemplateLoaderRepr(self):
"""
A L{MissingTemplateLoader} instance can be repr()'d without error.
"""
class PrettyReprElement(Element):
def __repr__(self):
return 'Pretty Repr Element'
self.assertIn('Pretty Repr Element',
repr(MissingTemplateLoader(PrettyReprElement())))
def test_missingRendererMethod(self):
"""
When called with the name which is not associated with a render method,
L{Element.lookupRenderMethod} raises L{MissingRenderMethod}.
"""
element = Element()
err = self.assertRaises(
MissingRenderMethod, element.lookupRenderMethod, "foo")
self.assertIdentical(err.element, element)
self.assertEqual(err.renderName, "foo")
def test_missingRenderMethodRepr(self):
"""
A L{MissingRenderMethod} instance can be repr()'d without error.
"""
class PrettyReprElement(Element):
def __repr__(self):
return 'Pretty Repr Element'
s = repr(MissingRenderMethod(PrettyReprElement(),
'expectedMethod'))
self.assertIn('Pretty Repr Element', s)
self.assertIn('expectedMethod', s)
def test_definedRenderer(self):
"""
When called with the name of a defined render method,
L{Element.lookupRenderMethod} returns that render method.
"""
class ElementWithRenderMethod(Element):
@renderer
def foo(self, request, tag):
return "bar"
foo = ElementWithRenderMethod().lookupRenderMethod("foo")
self.assertEqual(foo(None, None), "bar")
def test_render(self):
"""
L{Element.render} loads a document from the C{loader} attribute and
returns it.
"""
class TemplateLoader(object):
def load(self):
return "result"
class StubElement(Element):
loader = TemplateLoader()
element = StubElement()
self.assertEqual(element.render(None), "result")
def test_misuseRenderer(self):
"""
If the L{renderer} decorator is called without any arguments, it will
raise a comprehensible exception.
"""
te = self.assertRaises(TypeError, renderer)
self.assertEqual(str(te),
"expose() takes at least 1 argument (0 given)")
def test_renderGetDirectlyError(self):
"""
Called directly, without a default, L{renderer.get} raises
L{UnexposedMethodError} when it cannot find a renderer.
"""
self.assertRaises(UnexposedMethodError, renderer.get, None,
"notARenderer")
class XMLFileReprTests(TestCase):
"""
Tests for L{twisted.web.template.XMLFile}'s C{__repr__}.
"""
def test_filePath(self):
"""
An L{XMLFile} with a L{FilePath} returns a useful repr().
"""
path = FilePath("/tmp/fake.xml")
self.assertEqual('<XMLFile of %r>' % (path,), repr(XMLFile(path)))
def test_filename(self):
"""
An L{XMLFile} with a filename returns a useful repr().
"""
fname = "/tmp/fake.xml"
self.assertEqual('<XMLFile of %r>' % (fname,), repr(XMLFile(fname)))
test_filename.suppress = [_xmlFileSuppress]
def test_file(self):
"""
An L{XMLFile} with a file object returns a useful repr().
"""
fobj = StringIO("not xml")
self.assertEqual('<XMLFile of %r>' % (fobj,), repr(XMLFile(fobj)))
test_file.suppress = [_xmlFileSuppress]
class XMLLoaderTestsMixin(object):
"""
@ivar templateString: Simple template to use to exercise the loaders.
@ivar deprecatedUse: C{True} if this use of L{XMLFile} is deprecated and
should emit a C{DeprecationWarning}.
"""
loaderFactory = None
templateString = '<p>Hello, world.</p>'
def test_load(self):
"""
Verify that the loader returns a tag with the correct children.
"""
loader = self.loaderFactory()
tag, = loader.load()
warnings = self.flushWarnings(offendingFunctions=[self.loaderFactory])
if self.deprecatedUse:
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"Passing filenames or file objects to XMLFile is "
"deprecated since Twisted 12.1. Pass a FilePath instead.")
else:
self.assertEqual(len(warnings), 0)
self.assertEqual(tag.tagName, 'p')
self.assertEqual(tag.children, [u'Hello, world.'])
def test_loadTwice(self):
"""
If {load()} can be called on a loader twice the result should be the
same.
"""
loader = self.loaderFactory()
tags1 = loader.load()
tags2 = loader.load()
self.assertEqual(tags1, tags2)
test_loadTwice.suppress = [_xmlFileSuppress]
class XMLStringLoaderTests(TestCase, XMLLoaderTestsMixin):
"""
Tests for L{twisted.web.template.XMLString}
"""
deprecatedUse = False
def loaderFactory(self):
"""
@return: an L{XMLString} constructed with C{self.templateString}.
"""
return XMLString(self.templateString)
class XMLFileWithFilePathTests(TestCase, XMLLoaderTestsMixin):
"""
Tests for L{twisted.web.template.XMLFile}'s L{FilePath} support.
"""
deprecatedUse = False
def loaderFactory(self):
"""
@return: an L{XMLString} constructed with a L{FilePath} pointing to a
file that contains C{self.templateString}.
"""
fp = FilePath(self.mktemp())
fp.setContent(self.templateString)
return XMLFile(fp)
class XMLFileWithFileTests(TestCase, XMLLoaderTestsMixin):
"""
Tests for L{twisted.web.template.XMLFile}'s deprecated file object support.
"""
deprecatedUse = True
def loaderFactory(self):
"""
@return: an L{XMLString} constructed with a file object that contains
C{self.templateString}.
"""
return XMLFile(StringIO(self.templateString))
class XMLFileWithFilenameTests(TestCase, XMLLoaderTestsMixin):
"""
Tests for L{twisted.web.template.XMLFile}'s deprecated filename support.
"""
deprecatedUse = True
def loaderFactory(self):
"""
@return: an L{XMLString} constructed with a filename that points to a
file containing C{self.templateString}.
"""
fp = FilePath(self.mktemp())
fp.setContent(self.templateString)
return XMLFile(fp.path)
class FlattenIntegrationTests(FlattenTestCase):
"""
Tests for integration between L{Element} and
L{twisted.web._flatten.flatten}.
"""
def test_roundTrip(self):
"""
Given a series of parsable XML strings, verify that
L{twisted.web._flatten.flatten} will flatten the L{Element} back to the
input when sent on a round trip.
"""
fragments = [
"<p>Hello, world.</p>",
"<p><!-- hello, world --></p>",
"<p><![CDATA[Hello, world.]]></p>",
'<test1 xmlns:test2="urn:test2">'
'<test2:test3></test2:test3></test1>',
'<test1 xmlns="urn:test2"><test3></test3></test1>',
'<p>\xe2\x98\x83</p>',
]
deferreds = [
self.assertFlattensTo(Element(loader=XMLString(xml)), xml)
for xml in fragments]
return gatherResults(deferreds)
def test_entityConversion(self):
"""
When flattening an HTML entity, it should flatten out to the utf-8
representation if possible.
"""
element = Element(loader=XMLString('<p>☃</p>'))
return self.assertFlattensTo(element, '<p>\xe2\x98\x83</p>')
def test_missingTemplateLoader(self):
"""
Rendering a Element without a loader attribute raises the appropriate
exception.
"""
return self.assertFlatteningRaises(Element(), MissingTemplateLoader)
def test_missingRenderMethod(self):
"""
Flattening an L{Element} with a C{loader} which has a tag with a render
directive fails with L{FlattenerError} if there is no available render
method to satisfy that directive.
"""
element = Element(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="unknownMethod" />
"""))
return self.assertFlatteningRaises(element, MissingRenderMethod)
def test_transparentRendering(self):
"""
A C{transparent} element should be eliminated from the DOM and rendered as
only its children.
"""
element = Element(loader=XMLString(
'<t:transparent '
'xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">'
'Hello, world.'
'</t:transparent>'
))
return self.assertFlattensTo(element, "Hello, world.")
def test_attrRendering(self):
"""
An Element with an attr tag renders the vaule of its attr tag as an
attribute of its containing tag.
"""
element = Element(loader=XMLString(
'<a xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">'
'<t:attr name="href">http://example.com</t:attr>'
'Hello, world.'
'</a>'
))
return self.assertFlattensTo(element,
'<a href="http://example.com">Hello, world.</a>')
def test_errorToplevelAttr(self):
"""
A template with a toplevel C{attr} tag will not load; it will raise
L{AssertionError} if you try.
"""
self.assertRaises(
AssertionError,
XMLString,
"""<t:attr
xmlns:t='http://twistedmatrix.com/ns/twisted.web.template/0.1'
name='something'
>hello</t:attr>
""")
def test_errorUnnamedAttr(self):
"""
A template with an C{attr} tag with no C{name} attribute will not load;
it will raise L{AssertionError} if you try.
"""
self.assertRaises(
AssertionError,
XMLString,
"""<html><t:attr
xmlns:t='http://twistedmatrix.com/ns/twisted.web.template/0.1'
>hello</t:attr></html>""")
def test_lenientPrefixBehavior(self):
"""
If the parser sees a prefix it doesn't recognize on an attribute, it
will pass it on through to serialization.
"""
theInput = (
'<hello:world hello:sample="testing" '
'xmlns:hello="http://made-up.example.com/ns/not-real">'
'This is a made-up tag.</hello:world>')
element = Element(loader=XMLString(theInput))
self.assertFlattensTo(element, theInput)
def test_deferredRendering(self):
"""
An Element with a render method which returns a Deferred will render
correctly.
"""
class RenderfulElement(Element):
@renderer
def renderMethod(self, request, tag):
return succeed("Hello, world.")
element = RenderfulElement(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="renderMethod">
Goodbye, world.
</p>
"""))
return self.assertFlattensTo(element, "Hello, world.")
def test_loaderClassAttribute(self):
"""
If there is a non-None loader attribute on the class of an Element
instance but none on the instance itself, the class attribute is used.
"""
class SubElement(Element):
loader = XMLString("<p>Hello, world.</p>")
return self.assertFlattensTo(SubElement(), "<p>Hello, world.</p>")
def test_directiveRendering(self):
"""
An Element with a valid render directive has that directive invoked and
the result added to the output.
"""
renders = []
class RenderfulElement(Element):
@renderer
def renderMethod(self, request, tag):
renders.append((self, request))
return tag("Hello, world.")
element = RenderfulElement(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="renderMethod" />
"""))
return self.assertFlattensTo(element, "<p>Hello, world.</p>")
def test_directiveRenderingOmittingTag(self):
"""
An Element with a render method which omits the containing tag
successfully removes that tag from the output.
"""
class RenderfulElement(Element):
@renderer
def renderMethod(self, request, tag):
return "Hello, world."
element = RenderfulElement(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="renderMethod">
Goodbye, world.
</p>
"""))
return self.assertFlattensTo(element, "Hello, world.")
def test_elementContainingStaticElement(self):
"""
An Element which is returned by the render method of another Element is
rendered properly.
"""
class RenderfulElement(Element):
@renderer
def renderMethod(self, request, tag):
return tag(Element(
loader=XMLString("<em>Hello, world.</em>")))
element = RenderfulElement(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="renderMethod" />
"""))
return self.assertFlattensTo(element, "<p><em>Hello, world.</em></p>")
def test_elementUsingSlots(self):
"""
An Element which is returned by the render method of another Element is
rendered properly.
"""
class RenderfulElement(Element):
@renderer
def renderMethod(self, request, tag):
return tag.fillSlots(test2='world.')
element = RenderfulElement(loader=XMLString(
'<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"'
' t:render="renderMethod">'
'<t:slot name="test1" default="Hello, " />'
'<t:slot name="test2" />'
'</p>'
))
return self.assertFlattensTo(element, "<p>Hello, world.</p>")
def test_elementContainingDynamicElement(self):
"""
Directives in the document factory of a Element returned from a render
method of another Element are satisfied from the correct object: the
"inner" Element.
"""
class OuterElement(Element):
@renderer
def outerMethod(self, request, tag):
return tag(InnerElement(loader=XMLString("""
<t:ignored
xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="innerMethod" />
""")))
class InnerElement(Element):
@renderer
def innerMethod(self, request, tag):
return "Hello, world."
element = OuterElement(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="outerMethod" />
"""))
return self.assertFlattensTo(element, "<p>Hello, world.</p>")
def test_sameLoaderTwice(self):
"""
Rendering the output of a loader, or even the same element, should
return different output each time.
"""
sharedLoader = XMLString(
'<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">'
'<t:transparent t:render="classCounter" /> '
'<t:transparent t:render="instanceCounter" />'
'</p>')
class DestructiveElement(Element):
count = 0
instanceCount = 0
loader = sharedLoader
@renderer
def classCounter(self, request, tag):
DestructiveElement.count += 1
return tag(str(DestructiveElement.count))
@renderer
def instanceCounter(self, request, tag):
self.instanceCount += 1
return tag(str(self.instanceCount))
e1 = DestructiveElement()
e2 = DestructiveElement()
self.assertFlattensImmediately(e1, "<p>1 1</p>")
self.assertFlattensImmediately(e1, "<p>2 2</p>")
self.assertFlattensImmediately(e2, "<p>3 1</p>")
class TagLoaderTests(FlattenTestCase):
"""
Tests for L{TagLoader}.
"""
def setUp(self):
self.loader = TagLoader(tags.i('test'))
def test_interface(self):
"""
An instance of L{TagLoader} provides L{ITemplateLoader}.
"""
self.assertTrue(verifyObject(ITemplateLoader, self.loader))
def test_loadsList(self):
"""
L{TagLoader.load} returns a list, per L{ITemplateLoader}.
"""
self.assertIsInstance(self.loader.load(), list)
def test_flatten(self):
"""
L{TagLoader} can be used in an L{Element}, and flattens as the tag used
to construct the L{TagLoader} would flatten.
"""
e = Element(self.loader)
self.assertFlattensImmediately(e, '<i>test</i>')
class TestElement(Element):
"""
An L{Element} that can be rendered successfully.
"""
loader = XMLString(
'<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">'
'Hello, world.'
'</p>')
class TestFailureElement(Element):
"""
An L{Element} that can be used in place of L{FailureElement} to verify
that L{renderElement} can render failures properly.
"""
loader = XMLString(
'<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">'
'I failed.'
'</p>')
def __init__(self, failure, loader=None):
self.failure = failure
class FailingElement(Element):
"""
An element that raises an exception when rendered.
"""
def render(self, request):
a = 42
b = 0
return a // b
class FakeSite(object):
"""
A minimal L{Site} object that we can use to test displayTracebacks
"""
displayTracebacks = False
class RenderElementTests(TestCase):
"""
Test L{renderElement}
"""
def setUp(self):
"""
Set up a common L{DummyRequest} and L{FakeSite}.
"""
self.request = DummyRequest([""])
self.request.site = FakeSite()
def test_simpleRender(self):
"""
L{renderElement} returns NOT_DONE_YET and eventually
writes the rendered L{Element} to the request before finishing the
request.
"""
element = TestElement()
d = self.request.notifyFinish()
def check(_):
self.assertEqual(
"".join(self.request.written),
"<!DOCTYPE html>\n"
"<p>Hello, world.</p>")
self.assertTrue(self.request.finished)
d.addCallback(check)
self.assertIdentical(NOT_DONE_YET, renderElement(self.request, element))
return d
def test_simpleFailure(self):
"""
L{renderElement} handles failures by writing a minimal
error message to the request and finishing it.
"""
element = FailingElement()
d = self.request.notifyFinish()
def check(_):
flushed = self.flushLoggedErrors(FlattenerError)
self.assertEqual(len(flushed), 1)
self.assertEqual(
"".join(self.request.written),
('<!DOCTYPE html>\n'
'<div style="font-size:800%;'
'background-color:#FFF;'
'color:#F00'
'">An error occurred while rendering the response.</div>'))
self.assertTrue(self.request.finished)
d.addCallback(check)
self.assertIdentical(NOT_DONE_YET, renderElement(self.request, element))
return d
def test_simpleFailureWithTraceback(self):
"""
L{renderElement} will render a traceback when rendering of
the element fails and our site is configured to display tracebacks.
"""
self.request.site.displayTracebacks = True
element = FailingElement()
d = self.request.notifyFinish()
def check(_):
flushed = self.flushLoggedErrors(FlattenerError)
self.assertEqual(len(flushed), 1)
self.assertEqual(
"".join(self.request.written),
"<!DOCTYPE html>\n<p>I failed.</p>")
self.assertTrue(self.request.finished)
d.addCallback(check)
renderElement(self.request, element, _failElement=TestFailureElement)
return d
def test_nonDefaultDoctype(self):
"""
L{renderElement} will write the doctype string specified by the
doctype keyword argument.
"""
element = TestElement()
d = self.request.notifyFinish()
def check(_):
self.assertEqual(
"".join(self.request.written),
('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n'
'<p>Hello, world.</p>'))
d.addCallback(check)
renderElement(
self.request,
element,
doctype=(
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'))
return d
def test_noneDoctype(self):
"""
L{renderElement} will not write out a doctype if the doctype keyword
argument is C{None}.
"""
element = TestElement()
d = self.request.notifyFinish()
def check(_):
self.assertEqual(
"".join(self.request.written),
'<p>Hello, world.</p>')
d.addCallback(check)
renderElement(self.request, element, doctype=None)
return d
|
|
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import numpy as np
from rfho.models import LinearModel, vectorize_model
from rfho.utils import cross_entropy_loss, stepwise_pu, unconditional_pu, PrintUtils, norm
from rfho.datasets import load_iris, ExampleVisiting
from rfho.hyper_gradients import ReverseHG, ForwardHG
from rfho.optimizers import *
import unittest
class TestDohDirectDoh(unittest.TestCase):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
def test_single_hp(self):
tf.reset_default_graph()
T = 100
lr = .01
hyper_iterations = 10
hyper_learning_rate = .001
iris = load_iris([.4, .4])
x = tf.placeholder(tf.float32, name='x')
y = tf.placeholder(tf.float32, name='y')
model = LinearModel(x, 4, 3)
w, model_out = vectorize_model(model.var_list, model.inp[-1])
error = tf.reduce_mean(cross_entropy_loss(model_out, y))
correct_prediction = tf.equal(tf.argmax(model_out, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
eta = tf.Variable(lr, name='eta')
dynamics_dict = GradientDescentOptimizer.create(w, lr=eta, loss=error)
doh = ReverseHG(dynamics_dict, hyper_dict={error: eta})
grad = tf.gradients(error, w.tensor)[0]
hyper_dict = {error: (eta, -grad)}
direct_doh = ForwardHG(dynamics_dict, hyper_dict=hyper_dict)
# noinspection PyUnusedLocal
def all_training_supplier(step=None): return {x: iris.train.data, y: iris.train.target}
training_supplier = all_training_supplier
# noinspection PyUnusedLocal
def validation_supplier(step=None): return {x: iris.validation.data, y: iris.validation.target}
# noinspection PyUnusedLocal
def test_supplier(step=None): return {x: iris.test.data, y: iris.test.target}
psu = PrintUtils(
stepwise_pu(lambda ses, step: print('test accuracy', ses.run(accuracy, feed_dict=test_supplier())), T - 1),
)
psu2 = None
history_test_accuracy = []
history_eta = []
# noinspection PyUnusedLocal
def save_accuracies(ses, step):
history_test_accuracy.append(ses.run(accuracy, feed_dict=test_supplier()))
history_eta.append(ses.run(eta))
after_forward_su = PrintUtils(unconditional_pu(save_accuracies), unconditional_pu(
lambda ses, step: print('training error', error.eval(feed_dict=all_training_supplier()))))
delta_hyper = tf.placeholder(tf.float32)
hyper_upd_ops = [hyp.assign(tf.minimum(tf.maximum(hyp - delta_hyper, tf.zeros_like(hyp)), tf.ones_like(hyp)))
for hyp in doh.hyper_list] # check the sign of gradient
# In[ ]:
diffs = []
with tf.Session(config=TestDohDirectDoh.config).as_default() as ss:
tf.variables_initializer([eta]).run()
for _ in range(hyper_iterations):
direct_doh.initialize()
for _k in range(T):
direct_doh.step_forward(train_feed_dict_supplier=training_supplier, summary_utils=psu)
direct_res = direct_doh.hyper_gradient_vars(validation_suppliers=training_supplier)
res = doh.run_all(T, train_feed_dict_supplier=training_supplier, after_forward_su=after_forward_su,
val_feed_dict_suppliers=training_supplier, forward_su=psu, backward_su=psu2)
collected_hyper_gradients = list(ReverseHG.std_collect_hyper_gradients(res).values())
[ss.run(hyper_upd_ops[j],
feed_dict={delta_hyper: hyper_learning_rate * collected_hyper_gradients[j]})
for j in range(len(doh.hyper_list))]
self.assertLess(np.linalg.norm(np.array(direct_res[eta]) - np.array(collected_hyper_gradients)),
1.e-5)
diffs.append(np.array(direct_res[eta]) - np.array(collected_hyper_gradients))
ev_data = ExampleVisiting(iris, 10, 10)
T = ev_data.T
training_supplier = ev_data.create_feed_dict_supplier(x, y)
with tf.Session(config=TestDohDirectDoh.config).as_default() as ss:
tf.variables_initializer([eta]).run()
for _ in range(hyper_iterations):
ev_data.generate_visiting_scheme()
direct_doh.initialize()
for _k in range(T):
direct_doh.step_forward(train_feed_dict_supplier=training_supplier, summary_utils=psu)
direct_res = direct_doh.hyper_gradient_vars(validation_suppliers=all_training_supplier)
res = doh.run_all(T, train_feed_dict_supplier=training_supplier, after_forward_su=after_forward_su,
val_feed_dict_suppliers=all_training_supplier, forward_su=psu, backward_su=psu2)
collected_hyper_gradients = list(ReverseHG.std_collect_hyper_gradients(res).values())
[ss.run(hyper_upd_ops[j],
feed_dict={delta_hyper: hyper_learning_rate * collected_hyper_gradients[j]})
for j in range(len(doh.hyper_list))]
self.assertLess(np.linalg.norm(np.array(direct_res[eta]) - np.array(collected_hyper_gradients)),
1.e-5)
# def _test_multiple_hp(self, momentum=False): # FIXME update this test
# tf.reset_default_graph()
#
# T = 100
# lr = .01
#
# hyper_iterations = 10
#
# hyper_learning_rate = .001
#
# mu = None
# if momentum:
# mu = tf.Variable(.7, name='mu')
#
# iris = load_iris([.4, .4])
#
# x = tf.placeholder(tf.float32, name='x')
# y = tf.placeholder(tf.float32, name='y')
# model = LinearModel(x, 4, 3)
# w, model_out, mat_W, b = vectorize_model(model.var_list, model.inp[-1], model.Ws[0], model.bs[0],
# augment=momentum)
#
# error = tf.reduce_mean(cross_entropy_loss(model_out, y))
#
# gamma = tf.Variable([0., 0.], name='gamma')
# regularizer = gamma[0]*tf.reduce_sum(mat_W**2) + gamma[1]*tf.reduce_sum(b**2)
#
# training_error = error + regularizer
#
# correct_prediction = tf.equal(tf.argmax(model_out, 1), tf.argmax(y, 1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#
# eta = tf.Variable(lr, name='eta')
# if momentum:
# dynamics_dict = MomentumOptimizer.create(w, lr=eta, mu=mu, loss=training_error)
# else:
# dynamics_dict = GradientDescentOptimizer.create(w, lr=eta, loss=training_error)
#
# if momentum:
# doh = ReverseHG(dynamics_dict, hyper_dict={training_error: [eta, mu], error: [gamma]})
# else:
# doh = ReverseHG(dynamics_dict, hyper_dict={training_error: [eta], error: [gamma]})
#
# # In[8]:
# true_w = w.var_list(VlMode.TENSOR)[0]
# grad = tf.gradients(training_error, true_w)[0]
#
# _grad_reg = tf.gradients(regularizer, gamma)[0]
# grad_reg = tf.stack([
# tf.gradients(_grad_reg[0], true_w)[0], tf.gradients(_grad_reg[1], true_w)[0]
# ], axis=1)
#
# if momentum:
# w_b, m = w.var_list(VlMode.TENSOR)
# # noinspection PyUnresolvedReferences
# grad = ZMergedMatrix([
# - tf.transpose([mu * m + grad]),
# tf.zeros([m.get_shape().as_list()[0], 1])
# ])
# grad_reg = ZMergedMatrix([
# -eta*grad_reg,
# grad_reg
# ])
# grad_mu = ZMergedMatrix([
# - (eta * m),
# m
# ])
#
# else:
# grad_mu = None
# grad_reg *= eta
#
# if momentum:
# hyper_dict = {training_error: [(eta, grad), (mu, grad_mu)],
# error: (gamma, grad_reg)}
# direct_doh = ForwardHG(dynamics_dict, hyper_dict=hyper_dict)
# else:
# hyper_dict = {training_error: (eta, -grad),
# error: (gamma, -grad_reg)}
# direct_doh = ForwardHG(dynamics_dict, hyper_dict=hyper_dict)
#
# # noinspection PyUnusedLocal
# def all_training_supplier(step=None): return {x: iris.train.data, y: iris.train.target}
#
# training_supplier = all_training_supplier
#
# # noinspection PyUnusedLocal
# def validation_supplier(step=None): return {x: iris.validation.data, y: iris.validation.target}
#
# # noinspection PyUnusedLocal
# def test_supplier(step=None): return {x: iris.test.data, y: iris.test.target}
#
# psu = PrintUtils(
# stepwise_pu(lambda ses, step: print('test accuracy', ses.run(accuracy, feed_dict=test_supplier())), T - 1),
# )
# norm_p = norm(tf.concat(list(doh.p_dict.values()), 0))
# psu2 = PrintUtils(stepwise_pu(
# lambda ses, step: print('norm of costate', ses.run(norm_p)), T - 1))
#
# history_test_accuracy = []
# history_eta = []
#
# # noinspection PyUnusedLocal
# def save_accuracies(ses, step):
# history_test_accuracy.append(ses.run(accuracy, feed_dict=test_supplier()))
# history_eta.append(ses.run(eta))
#
# after_forward_su = PrintUtils(unconditional_pu(save_accuracies), unconditional_pu(
# lambda ses, step: print('training error', error.eval(feed_dict=all_training_supplier()))))
#
# delta_hyper = tf.placeholder(tf.float32)
#
# hyper_upd_ops = {hyp: hyp.assign(tf.maximum(hyp - delta_hyper, tf.zeros_like(hyp)))
# for hyp in doh.hyper_list} # check the sign of gradient
#
# with tf.Session(config=TestDohDirectDoh.config).as_default() as ss:
# tf.variables_initializer(doh.hyper_list).run()
#
# for _ in range(hyper_iterations):
#
# direct_doh.initialize()
# for _k in range(T):
# direct_doh.step_forward(train_feed_dict_supplier=training_supplier, summary_utils=psu)
#
# validation_suppliers = {training_error: training_supplier, error: validation_supplier}
#
# if momentum:
# direct_res = direct_doh.hyper_gradient_vars(validation_suppliers=validation_suppliers)
# else:
# direct_res = direct_doh.hyper_gradient_vars(validation_suppliers=validation_suppliers)
#
# res = doh.run_all(T, train_feed_dict_supplier=training_supplier, after_forward_su=after_forward_su,
# val_feed_dict_suppliers={error: validation_supplier, training_error: training_supplier},
# forward_su=psu, backward_su=psu2)
#
# collected_hyper_gradients = ReverseHG.std_collect_hyper_gradients(res)
#
# [ss.run(hyper_upd_ops[hyp],
# feed_dict={delta_hyper: hyper_learning_rate * collected_hyper_gradients[hyp]})
# for hyp in doh.hyper_list]
#
# for hyp in doh.hyper_list:
# self.assertLess(np.linalg.norm(
# np.array(direct_res[hyp]) - np.array(collected_hyper_gradients[hyp])), 1.e-5)
#
# def test_multiple_hypers(self):
# self._test_multiple_hp(momentum=False)
# self._test_multiple_hp(momentum=True)
def setUp(self):
tf.reset_default_graph()
if __name__ == '__main__':
# TestDohDirectDoh()._test_multiple_hp(True)
unittest.main()
|
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a python sync server used for testing Chrome Sync.
By default, it listens on an ephemeral port and xmpp_port and sends the port
numbers back to the originating process over a pipe. The originating process can
specify an explicit port and xmpp_port if necessary.
"""
import asyncore
import BaseHTTPServer
import errno
import os
import select
import socket
import sys
import urlparse
import chromiumsync
import echo_message
import testserver_base
import xmppserver
class SyncHTTPServer(testserver_base.ClientRestrictingServerMixIn,
testserver_base.BrokenPipeHandlerMixIn,
testserver_base.StoppableHTTPServer):
"""An HTTP server that handles sync commands."""
def __init__(self, server_address, xmpp_port, request_handler_class):
testserver_base.StoppableHTTPServer.__init__(self,
server_address,
request_handler_class)
self._sync_handler = chromiumsync.TestServer()
self._xmpp_socket_map = {}
self._xmpp_server = xmppserver.XmppServer(
self._xmpp_socket_map, ('localhost', xmpp_port))
self.xmpp_port = self._xmpp_server.getsockname()[1]
self.authenticated = True
def GetXmppServer(self):
return self._xmpp_server
def HandleCommand(self, query, raw_request):
return self._sync_handler.HandleCommand(query, raw_request)
def HandleRequestNoBlock(self):
"""Handles a single request.
Copied from SocketServer._handle_request_noblock().
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except Exception:
self.handle_error(request, client_address)
self.close_request(request)
def SetAuthenticated(self, auth_valid):
self.authenticated = auth_valid
def GetAuthenticated(self):
return self.authenticated
def serve_forever(self):
"""This is a merge of asyncore.loop() and SocketServer.serve_forever().
"""
def HandleXmppSocket(fd, socket_map, handler):
"""Runs the handler for the xmpp connection for fd.
Adapted from asyncore.read() et al.
"""
xmpp_connection = socket_map.get(fd)
# This could happen if a previous handler call caused fd to get
# removed from socket_map.
if xmpp_connection is None:
return
try:
handler(xmpp_connection)
except (asyncore.ExitNow, KeyboardInterrupt, SystemExit):
raise
except:
xmpp_connection.handle_error()
while True:
read_fds = [ self.fileno() ]
write_fds = []
exceptional_fds = []
for fd, xmpp_connection in self._xmpp_socket_map.items():
is_r = xmpp_connection.readable()
is_w = xmpp_connection.writable()
if is_r:
read_fds.append(fd)
if is_w:
write_fds.append(fd)
if is_r or is_w:
exceptional_fds.append(fd)
try:
read_fds, write_fds, exceptional_fds = (
select.select(read_fds, write_fds, exceptional_fds))
except select.error, err:
if err.args[0] != errno.EINTR:
raise
else:
continue
for fd in read_fds:
if fd == self.fileno():
self.HandleRequestNoBlock()
continue
HandleXmppSocket(fd, self._xmpp_socket_map,
asyncore.dispatcher.handle_read_event)
for fd in write_fds:
HandleXmppSocket(fd, self._xmpp_socket_map,
asyncore.dispatcher.handle_write_event)
for fd in exceptional_fds:
HandleXmppSocket(fd, self._xmpp_socket_map,
asyncore.dispatcher.handle_expt_event)
class SyncPageHandler(testserver_base.BasePageHandler):
"""Handler for the main HTTP sync server."""
def __init__(self, request, client_address, sync_http_server):
get_handlers = [self.ChromiumSyncTimeHandler,
self.ChromiumSyncMigrationOpHandler,
self.ChromiumSyncCredHandler,
self.ChromiumSyncXmppCredHandler,
self.ChromiumSyncDisableNotificationsOpHandler,
self.ChromiumSyncEnableNotificationsOpHandler,
self.ChromiumSyncSendNotificationOpHandler,
self.ChromiumSyncBirthdayErrorOpHandler,
self.ChromiumSyncTransientErrorOpHandler,
self.ChromiumSyncErrorOpHandler,
self.ChromiumSyncSyncTabFaviconsOpHandler,
self.ChromiumSyncCreateSyncedBookmarksOpHandler,
self.ChromiumSyncEnableKeystoreEncryptionOpHandler,
self.ChromiumSyncRotateKeystoreKeysOpHandler,
self.ChromiumSyncEnableManagedUserAcknowledgementHandler,
self.ChromiumSyncEnablePreCommitGetUpdateAvoidanceHandler,
self.GaiaOAuth2TokenHandler,
self.GaiaSetOAuth2TokenResponseHandler,
self.TriggerSyncedNotificationHandler,
self.SyncedNotificationsPageHandler,
self.TriggerSyncedNotificationAppInfoHandler,
self.SyncedNotificationsAppInfoPageHandler,
self.CustomizeClientCommandHandler]
post_handlers = [self.ChromiumSyncCommandHandler,
self.ChromiumSyncTimeHandler,
self.GaiaOAuth2TokenHandler,
self.GaiaSetOAuth2TokenResponseHandler]
testserver_base.BasePageHandler.__init__(self, request, client_address,
sync_http_server, [], get_handlers,
[], post_handlers, [])
def ChromiumSyncTimeHandler(self):
"""Handle Chromium sync .../time requests.
The syncer sometimes checks server reachability by examining /time.
"""
test_name = "/chromiumsync/time"
if not self._ShouldHandleRequest(test_name):
return False
# Chrome hates it if we send a response before reading the request.
if self.headers.getheader('content-length'):
length = int(self.headers.getheader('content-length'))
_raw_request = self.rfile.read(length)
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('0123456789')
return True
def ChromiumSyncCommandHandler(self):
"""Handle a chromiumsync command arriving via http.
This covers all sync protocol commands: authentication, getupdates, and
commit.
"""
test_name = "/chromiumsync/command"
if not self._ShouldHandleRequest(test_name):
return False
length = int(self.headers.getheader('content-length'))
raw_request = self.rfile.read(length)
http_response = 200
raw_reply = None
if not self.server.GetAuthenticated():
http_response = 401
challenge = 'GoogleLogin realm="http://%s", service="chromiumsync"' % (
self.server.server_address[0])
else:
http_response, raw_reply = self.server.HandleCommand(
self.path, raw_request)
### Now send the response to the client. ###
self.send_response(http_response)
if http_response == 401:
self.send_header('www-Authenticate', challenge)
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncMigrationOpHandler(self):
test_name = "/chromiumsync/migrate"
if not self._ShouldHandleRequest(test_name):
return False
http_response, raw_reply = self.server._sync_handler.HandleMigrate(
self.path)
self.send_response(http_response)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncCredHandler(self):
test_name = "/chromiumsync/cred"
if not self._ShouldHandleRequest(test_name):
return False
try:
query = urlparse.urlparse(self.path)[4]
cred_valid = urlparse.parse_qs(query)['valid']
if cred_valid[0] == 'True':
self.server.SetAuthenticated(True)
else:
self.server.SetAuthenticated(False)
except Exception:
self.server.SetAuthenticated(False)
http_response = 200
raw_reply = 'Authenticated: %s ' % self.server.GetAuthenticated()
self.send_response(http_response)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncXmppCredHandler(self):
test_name = "/chromiumsync/xmppcred"
if not self._ShouldHandleRequest(test_name):
return False
xmpp_server = self.server.GetXmppServer()
try:
query = urlparse.urlparse(self.path)[4]
cred_valid = urlparse.parse_qs(query)['valid']
if cred_valid[0] == 'True':
xmpp_server.SetAuthenticated(True)
else:
xmpp_server.SetAuthenticated(False)
except:
xmpp_server.SetAuthenticated(False)
http_response = 200
raw_reply = 'XMPP Authenticated: %s ' % xmpp_server.GetAuthenticated()
self.send_response(http_response)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncDisableNotificationsOpHandler(self):
test_name = "/chromiumsync/disablenotifications"
if not self._ShouldHandleRequest(test_name):
return False
self.server.GetXmppServer().DisableNotifications()
result = 200
raw_reply = ('<html><title>Notifications disabled</title>'
'<H1>Notifications disabled</H1></html>')
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncEnableNotificationsOpHandler(self):
test_name = "/chromiumsync/enablenotifications"
if not self._ShouldHandleRequest(test_name):
return False
self.server.GetXmppServer().EnableNotifications()
result = 200
raw_reply = ('<html><title>Notifications enabled</title>'
'<H1>Notifications enabled</H1></html>')
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncSendNotificationOpHandler(self):
test_name = "/chromiumsync/sendnotification"
if not self._ShouldHandleRequest(test_name):
return False
query = urlparse.urlparse(self.path)[4]
query_params = urlparse.parse_qs(query)
channel = ''
data = ''
if 'channel' in query_params:
channel = query_params['channel'][0]
if 'data' in query_params:
data = query_params['data'][0]
self.server.GetXmppServer().SendNotification(channel, data)
result = 200
raw_reply = ('<html><title>Notification sent</title>'
'<H1>Notification sent with channel "%s" '
'and data "%s"</H1></html>'
% (channel, data))
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncBirthdayErrorOpHandler(self):
test_name = "/chromiumsync/birthdayerror"
if not self._ShouldHandleRequest(test_name):
return False
result, raw_reply = self.server._sync_handler.HandleCreateBirthdayError()
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncTransientErrorOpHandler(self):
test_name = "/chromiumsync/transienterror"
if not self._ShouldHandleRequest(test_name):
return False
result, raw_reply = self.server._sync_handler.HandleSetTransientError()
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncErrorOpHandler(self):
test_name = "/chromiumsync/error"
if not self._ShouldHandleRequest(test_name):
return False
result, raw_reply = self.server._sync_handler.HandleSetInducedError(
self.path)
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncSyncTabFaviconsOpHandler(self):
test_name = "/chromiumsync/synctabfavicons"
if not self._ShouldHandleRequest(test_name):
return False
result, raw_reply = self.server._sync_handler.HandleSetSyncTabFavicons()
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncCreateSyncedBookmarksOpHandler(self):
test_name = "/chromiumsync/createsyncedbookmarks"
if not self._ShouldHandleRequest(test_name):
return False
result, raw_reply = self.server._sync_handler.HandleCreateSyncedBookmarks()
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncEnableKeystoreEncryptionOpHandler(self):
test_name = "/chromiumsync/enablekeystoreencryption"
if not self._ShouldHandleRequest(test_name):
return False
result, raw_reply = (
self.server._sync_handler.HandleEnableKeystoreEncryption())
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncRotateKeystoreKeysOpHandler(self):
test_name = "/chromiumsync/rotatekeystorekeys"
if not self._ShouldHandleRequest(test_name):
return False
result, raw_reply = (
self.server._sync_handler.HandleRotateKeystoreKeys())
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncEnableManagedUserAcknowledgementHandler(self):
test_name = "/chromiumsync/enablemanageduseracknowledgement"
if not self._ShouldHandleRequest(test_name):
return False
result, raw_reply = (
self.server._sync_handler.HandleEnableManagedUserAcknowledgement())
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncEnablePreCommitGetUpdateAvoidanceHandler(self):
test_name = "/chromiumsync/enableprecommitgetupdateavoidance"
if not self._ShouldHandleRequest(test_name):
return False
result, raw_reply = (
self.server._sync_handler.HandleEnablePreCommitGetUpdateAvoidance())
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def GaiaOAuth2TokenHandler(self):
test_name = "/o/oauth2/token"
if not self._ShouldHandleRequest(test_name):
return False
if self.headers.getheader('content-length'):
length = int(self.headers.getheader('content-length'))
_raw_request = self.rfile.read(length)
result, raw_reply = (
self.server._sync_handler.HandleGetOauth2Token())
self.send_response(result)
self.send_header('Content-Type', 'application/json')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def GaiaSetOAuth2TokenResponseHandler(self):
test_name = "/setfakeoauth2token"
if not self._ShouldHandleRequest(test_name):
return False
# The index of 'query' is 4.
# See http://docs.python.org/2/library/urlparse.html
query = urlparse.urlparse(self.path)[4]
query_params = urlparse.parse_qs(query)
response_code = 0
request_token = ''
access_token = ''
expires_in = 0
token_type = ''
if 'response_code' in query_params:
response_code = query_params['response_code'][0]
if 'request_token' in query_params:
request_token = query_params['request_token'][0]
if 'access_token' in query_params:
access_token = query_params['access_token'][0]
if 'expires_in' in query_params:
expires_in = query_params['expires_in'][0]
if 'token_type' in query_params:
token_type = query_params['token_type'][0]
result, raw_reply = (
self.server._sync_handler.HandleSetOauth2Token(
response_code, request_token, access_token, expires_in, token_type))
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def TriggerSyncedNotificationHandler(self):
test_name = "/triggersyncednotification"
if not self._ShouldHandleRequest(test_name):
return False
query = urlparse.urlparse(self.path)[4]
query_params = urlparse.parse_qs(query)
serialized_notification = ''
if 'serialized_notification' in query_params:
serialized_notification = query_params['serialized_notification'][0]
try:
notification_string = self.server._sync_handler.account \
.AddSyncedNotification(serialized_notification)
reply = "A synced notification was triggered:\n\n"
reply += "<code>{}</code>.".format(notification_string)
response_code = 200
except chromiumsync.ClientNotConnectedError:
reply = ('The client is not connected to the server, so the notification'
' could not be created.')
response_code = 400
self.send_response(response_code)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(reply))
self.end_headers()
self.wfile.write(reply)
return True
def TriggerSyncedNotificationAppInfoHandler(self):
test_name = "/triggersyncednotificationappinfo"
if not self._ShouldHandleRequest(test_name):
return False
query = urlparse.urlparse(self.path)[4]
query_params = urlparse.parse_qs(query)
app_info = ''
if 'synced_notification_app_info' in query_params:
app_info = query_params['synced_notification_app_info'][0]
try:
app_info_string = self.server._sync_handler.account \
.AddSyncedNotificationAppInfo(app_info)
reply = "A synced notification app info was sent:\n\n"
reply += "<code>{}</code>.".format(app_info_string)
response_code = 200
except chromiumsync.ClientNotConnectedError:
reply = ('The client is not connected to the server, so the app info'
' could not be created.')
response_code = 400
self.send_response(response_code)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(reply))
self.end_headers()
self.wfile.write(reply)
return True
def CustomizeClientCommandHandler(self):
test_name = "/customizeclientcommand"
if not self._ShouldHandleRequest(test_name):
return False
query = urlparse.urlparse(self.path)[4]
query_params = urlparse.parse_qs(query)
if 'sessions_commit_delay_seconds' in query_params:
sessions_commit_delay = query_params['sessions_commit_delay_seconds'][0]
try:
command_string = self.server._sync_handler.CustomizeClientCommand(
int(sessions_commit_delay))
response_code = 200
reply = "The ClientCommand was customized:\n\n"
reply += "<code>{}</code>.".format(command_string)
except ValueError:
response_code = 400
reply = "sessions_commit_delay_seconds was not an int"
else:
response_code = 400
reply = "sessions_commit_delay_seconds is required"
self.send_response(response_code)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(reply))
self.end_headers()
self.wfile.write(reply)
return True
def SyncedNotificationsPageHandler(self):
test_name = "/syncednotifications"
if not self._ShouldHandleRequest(test_name):
return False
html = open('sync/tools/testserver/synced_notifications.html', 'r').read()
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(html))
self.end_headers()
self.wfile.write(html)
return True
def SyncedNotificationsAppInfoPageHandler(self):
test_name = "/syncednotificationsappinfo"
if not self._ShouldHandleRequest(test_name):
return False
html = \
open('sync/tools/testserver/synced_notification_app_info.html', 'r').\
read()
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(html))
self.end_headers()
self.wfile.write(html)
return True
class SyncServerRunner(testserver_base.TestServerRunner):
"""TestServerRunner for the net test servers."""
def __init__(self):
super(SyncServerRunner, self).__init__()
def create_server(self, server_data):
port = self.options.port
host = self.options.host
xmpp_port = self.options.xmpp_port
server = SyncHTTPServer((host, port), xmpp_port, SyncPageHandler)
print ('Sync HTTP server started at %s:%d/chromiumsync...' %
(host, server.server_port))
print ('Fake OAuth2 Token server started at %s:%d/o/oauth2/token...' %
(host, server.server_port))
print ('Sync XMPP server started at %s:%d...' %
(host, server.xmpp_port))
server_data['port'] = server.server_port
server_data['xmpp_port'] = server.xmpp_port
return server
def run_server(self):
testserver_base.TestServerRunner.run_server(self)
def add_options(self):
testserver_base.TestServerRunner.add_options(self)
self.option_parser.add_option('--xmpp-port', default='0', type='int',
help='Port used by the XMPP server. If '
'unspecified, the XMPP server will listen on '
'an ephemeral port.')
# Override the default logfile name used in testserver.py.
self.option_parser.set_defaults(log_file='sync_testserver.log')
if __name__ == '__main__':
sys.exit(SyncServerRunner().main())
|
|
# Copyright (C) 2013 by Yanbo Ye (yeyanbo289@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classes and methods for tree construction"""
__docformat__ = "restructuredtext en"
import itertools
import copy
from Bio.Phylo import BaseTree
from Bio.Align import MultipleSeqAlignment
from Bio.SubsMat import MatrixInfo
from Bio import _py3k
def _is_numeric(x):
return _py3k._is_int_or_long(x) or isinstance(x, (float, complex))
class _Matrix(object):
"""Base class for distance matrix or scoring matrix
Accepts a list of names and a lower triangular matrix.::
matrix = [[0],
[1, 0],
[2, 3, 0],
[4, 5, 6, 0]]
represents the symmetric matrix of
[0,1,2,4]
[1,0,3,5]
[2,3,0,6]
[4,5,6,0]
:Parameters:
names : list
names of elements, used for indexing
matrix : list
nested list of numerical lists in lower triangular format
Example
-------
>>> from Bio.Phylo.TreeConstruction import _Matrix
>>> names = ['Alpha', 'Beta', 'Gamma', 'Delta']
>>> matrix = [[0], [1, 0], [2, 3, 0], [4, 5, 6, 0]]
>>> m = _Matrix(names, matrix)
>>> m
_Matrix(names=['Alpha', 'Beta', 'Gamma', 'Delta'], matrix=[[0], [1, 0], [2, 3, 0], [4, 5, 6, 0]])
You can use two indices to get or assign an element in the matrix.
>>> m[1,2]
3
>>> m['Beta','Gamma']
3
>>> m['Beta','Gamma'] = 4
>>> m['Beta','Gamma']
4
Further more, you can use one index to get or assign a list of elements related to that index.
>>> m[0]
[0, 1, 2, 4]
>>> m['Alpha']
[0, 1, 2, 4]
>>> m['Alpha'] = [0, 7, 8, 9]
>>> m[0]
[0, 7, 8, 9]
>>> m[0,1]
7
Also you can delete or insert a column&row of elemets by index.
>>> m
_Matrix(names=['Alpha', 'Beta', 'Gamma', 'Delta'], matrix=[[0], [7, 0], [8, 4, 0], [9, 5, 6, 0]])
>>> del m['Alpha']
>>> m
_Matrix(names=['Beta', 'Gamma', 'Delta'], matrix=[[0], [4, 0], [5, 6, 0]])
>>> m.insert('Alpha', [0, 7, 8, 9] , 0)
>>> m
_Matrix(names=['Alpha', 'Beta', 'Gamma', 'Delta'], matrix=[[0], [7, 0], [8, 4, 0], [9, 5, 6, 0]])
"""
def __init__(self, names, matrix=None):
"""Initialize matrix by a list of names and a list of
lower triangular matrix data"""
# check names
if isinstance(names, list) and all(isinstance(s, str) for s in names):
if len(set(names)) == len(names):
self.names = names
else:
raise ValueError("Duplicate names found")
else:
raise TypeError("'names' should be a list of strings")
# check matrix
if matrix is None:
# create a new one with 0 if matrix is not assigned
matrix = [[0] * i for i in range(1, len(self) + 1)]
self.matrix = matrix
else:
# check if all elements are numbers
if (isinstance(matrix, list)
and all(isinstance(l, list) for l in matrix)
and all(_is_numeric(n) for n in [item for sublist in matrix
for item in sublist])):
# check if the same length with names
if len(matrix) == len(names):
# check if is lower triangle format
if [len(m) for m in matrix] == list(range(1, len(self) + 1)):
self.matrix = matrix
else:
raise ValueError(
"'matrix' should be in lower triangle format")
else:
raise ValueError(
"'names' and 'matrix' should be the same size")
else:
raise TypeError("'matrix' should be a list of numerical lists")
def __getitem__(self, item):
"""Access value(s) by the index(s) or name(s).
For a _Matrix object 'dm'::
dm[i] get a value list from the given 'i' to others;
dm[i, j] get the value between 'i' and 'j';
dm['name'] map name to index first
dm['name1', 'name2'] map name to index first
"""
# Handle single indexing
if isinstance(item, (int, str)):
index = None
if isinstance(item, int):
index = item
elif isinstance(item, str):
if item in self.names:
index = self.names.index(item)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if index > len(self) - 1:
raise IndexError("Index out of range.")
return [self.matrix[index][i] for i in range(0, index)] + [self.matrix[i][index] for i in range(index, len(self))]
# Handle double indexing
elif len(item) == 2:
row_index = None
col_index = None
if all(isinstance(i, int) for i in item):
row_index, col_index = item
elif all(isinstance(i, str) for i in item):
row_name, col_name = item
if row_name in self.names and col_name in self.names:
row_index = self.names.index(row_name)
col_index = self.names.index(col_name)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if row_index > len(self) - 1 or col_index > len(self) - 1:
raise IndexError("Index out of range.")
if row_index > col_index:
return self.matrix[row_index][col_index]
else:
return self.matrix[col_index][row_index]
else:
raise TypeError("Invalid index type.")
def __setitem__(self, item, value):
"""Set value by the index(s) or name(s).
Similar to __getitem__::
dm[1] = [1, 0, 3, 4] set values from '1' to others;
dm[i, j] = 2 set the value from 'i' to 'j'
"""
# Handle single indexing
if isinstance(item, (int, str)):
index = None
if isinstance(item, int):
index = item
elif isinstance(item, str):
if item in self.names:
index = self.names.index(item)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if index > len(self) - 1:
raise IndexError("Index out of range.")
# check and assign value
if isinstance(value, list) and all(_is_numeric(n) for n in value):
if len(value) == len(self):
for i in range(0, index):
self.matrix[index][i] = value[i]
for i in range(index, len(self)):
self.matrix[i][index] = value[i]
else:
raise ValueError("Value not the same size.")
else:
raise TypeError("Invalid value type.")
# Handle double indexing
elif len(item) == 2:
row_index = None
col_index = None
if all(isinstance(i, int) for i in item):
row_index, col_index = item
elif all(isinstance(i, str) for i in item):
row_name, col_name = item
if row_name in self.names and col_name in self.names:
row_index = self.names.index(row_name)
col_index = self.names.index(col_name)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if row_index > len(self) - 1 or col_index > len(self) - 1:
raise IndexError("Index out of range.")
# check and assign value
if _is_numeric(value):
if row_index > col_index:
self.matrix[row_index][col_index] = value
else:
self.matrix[col_index][row_index] = value
else:
raise TypeError("Invalid value type.")
else:
raise TypeError("Invalid index type.")
def __delitem__(self, item):
"""Delete related distances by the index or name"""
index = None
if isinstance(item, int):
index = item
elif isinstance(item, str):
index = self.names.index(item)
else:
raise TypeError("Invalid index type.")
# remove distances related to index
for i in range(index + 1, len(self)):
del self.matrix[i][index]
del self.matrix[index]
# remove name
del self.names[index]
def insert(self, name, value, index=None):
"""Insert distances given the name and value.
:Parameters:
name : str
name of a row/col to be inserted
value : list
a row/col of values to be inserted
"""
if isinstance(name, str):
# insert at the given index or at the end
if index is None:
index = len(self)
if not isinstance(index, int):
raise TypeError("Invalid index type.")
# insert name
self.names.insert(index, name)
# insert elements of 0, to be assigned
self.matrix.insert(index, [0] * index)
for i in range(index, len(self)):
self.matrix[i].insert(index, 0)
# assign value
self[index] = value
else:
raise TypeError("Invalid name type.")
def __len__(self):
"""Matrix length"""
return len(self.names)
def __repr__(self):
return self.__class__.__name__ \
+ "(names=%s, matrix=%s)" \
% tuple(map(repr, (self.names, self.matrix)))
def __str__(self):
"""Get a lower triangular matrix string"""
matrix_string = '\n'.join(
[self.names[i] + "\t" + "\t".join([str(n) for n in self.matrix[i]])
for i in range(0, len(self))])
matrix_string = matrix_string + "\n\t" + "\t".join(self.names)
return matrix_string
class _DistanceMatrix(_Matrix):
"""Distance matrix class that can be used for distance based tree algorithms.
All diagonal elements will be zero no matter what the users provide.
"""
def __init__(self, names, matrix=None):
_Matrix.__init__(self, names, matrix)
self._set_zero_diagonal()
def __setitem__(self, item, value):
_Matrix.__setitem__(self, item, value)
self._set_zero_diagonal()
def _set_zero_diagonal(self):
"""set all diagonal elements to zero"""
for i in range(0, len(self)):
self.matrix[i][i] = 0
class DistanceCalculator(object):
"""Class to calculate the distance matrix from a DNA or Protein
Multiple Sequence Alignment(MSA) and the given name of the
substitution model.
Currently only scoring matrices are used.
:Parameters:
model : str
Name of the model matrix to be used to calculate distance.
The attribute `dna_matrices` contains the available model
names for DNA sequences and `protein_matrices` for protein
sequences.
Example
-------
>>> from Bio.Phylo.TreeConstruction import DistanceCalculator
>>> from Bio import AlignIO
>>> aln = AlignIO.read(open('Tests/TreeConstruction/msa.phy'), 'phylip')
>>> print aln
SingleLetterAlphabet() alignment with 5 rows and 13 columns
AACGTGGCCACAT Alpha
AAGGTCGCCACAC Beta
GAGATTTCCGCCT Delta
GAGATCTCCGCCC Epsilon
CAGTTCGCCACAA Gamma
DNA calculator with 'identity' model::
>>> calculator = DistanceCalculator('identity')
>>> dm = calculator.get_distance(aln)
>>> print dm
Alpha 0
Beta 0.230769230769 0
Gamma 0.384615384615 0.230769230769 0
Delta 0.538461538462 0.538461538462 0.538461538462 0
Epsilon 0.615384615385 0.384615384615 0.461538461538 0.153846153846 0
Alpha Beta Gamma Delta Epsilon
Protein calculator with 'blosum62' model::
>>> calculator = DistanceCalculator('blosum62')
>>> dm = calculator.get_distance(aln)
>>> print dm
Alpha 0
Beta 0.369047619048 0
Gamma 0.493975903614 0.25 0
Delta 0.585365853659 0.547619047619 0.566265060241 0
Epsilon 0.7 0.355555555556 0.488888888889 0.222222222222 0
Alpha Beta Gamma Delta Epsilon
"""
dna_alphabet = ['A', 'T', 'C', 'G']
# BLAST nucleic acid scoring matrix
blastn = [[5],
[-4, 5],
[-4, -4, 5],
[-4, -4, -4, 5]]
# transition/transversion scoring matrix
trans = [[6],
[-5, 6],
[-5, -1, 6],
[-1, -5, -5, 6]]
protein_alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y',
'Z']
# matrices available
dna_matrices = {'blastn': blastn, 'trans': trans}
protein_models = MatrixInfo.available_matrices
protein_matrices = dict((name, getattr(MatrixInfo, name))
for name in protein_models)
dna_models = list(dna_matrices.keys())
models = ['identity'] + dna_models + protein_models
def __init__(self, model='identity'):
"""Initialize with a distance model"""
if model == 'identity':
self.scoring_matrix = None
elif model in self.dna_models:
self.scoring_matrix = _Matrix(self.dna_alphabet,
self.dna_matrices[model])
elif model in self.protein_models:
self.scoring_matrix = self._build_protein_matrix(
self.protein_matrices[model])
else:
raise ValueError("Model not supported. Available models: "
+ ", ".join(self.models))
def _pairwise(self, seq1, seq2):
"""Calculate pairwise distance from two sequences.
Returns a value between 0 (identical sequences) and 1 (completely
different, or seq1 is an empty string.)
"""
score = 0
max_score = 0
if self.scoring_matrix:
max_score1 = 0
max_score2 = 0
skip_letters = ['-', '*']
for i in range(0, len(seq1)):
l1 = seq1[i]
l2 = seq2[i]
if l1 in skip_letters or l2 in skip_letters:
continue
if l1 not in self.scoring_matrix.names:
raise ValueError("Bad alphabet '%s' in sequence '%s' at position '%s'"
% (l1, seq1.id, i))
if l2 not in self.scoring_matrix.names:
raise ValueError("Bad alphabet '%s' in sequence '%s' at position '%s'"
% (l2, seq2.id, i))
max_score1 += self.scoring_matrix[l1, l1]
max_score2 += self.scoring_matrix[l2, l2]
score += self.scoring_matrix[l1, l2]
# Take the higher score if the matrix is asymmetrical
max_score = max(max_score1, max_score2)
else:
# Score by character identity, not skipping any special letters
for i in range(0, len(seq1)):
l1 = seq1[i]
l2 = seq2[i]
if l1 == l2:
score += 1
max_score = len(seq1)
if max_score == 0:
return 1 # max possible scaled distance
return 1 - (score * 1.0 / max_score)
def get_distance(self, msa):
"""Return a _DistanceMatrix for MSA object
:Parameters:
msa : MultipleSeqAlignment
DNA or Protein multiple sequence alignment.
"""
if not isinstance(msa, MultipleSeqAlignment):
raise TypeError("Must provide a MultipleSeqAlignment object.")
names = [s.id for s in msa]
dm = _DistanceMatrix(names)
for seq1, seq2 in itertools.combinations(msa, 2):
dm[seq1.id, seq2.id] = self._pairwise(seq1, seq2)
return dm
def _build_protein_matrix(self, subsmat):
"""Convert matrix from SubsMat format to _Matrix object"""
protein_matrix = _Matrix(self.protein_alphabet)
for k, v in subsmat.items():
aa1, aa2 = k
protein_matrix[aa1, aa2] = v
return protein_matrix
class TreeConstructor(object):
"""Base class for all tree constructor."""
def build_tree(self, msa):
"""Caller to built the tree from a MultipleSeqAlignment object.
This should be implemented in subclass"""
raise NotImplementedError("Method not implemented!")
class DistanceTreeConstructor(TreeConstructor):
"""Distance based tree constructor.
:Parameters:
method : str
Distance tree construction method, 'nj'(default) or 'upgma'.
distance_calculator : DistanceCalculator
The distance matrix calculator for multiple sequence alignment.
It must be provided if `build_tree` will be called.
Example
--------
>>> from TreeConstruction import DistanceTreeConstructor
>>> constructor = DistanceTreeConstructor()
UPGMA Tree:
>>> upgmatree = constructor.upgma(dm)
>>> print upgmatree
Tree(rooted=True)
Clade(name='Inner4')
Clade(branch_length=0.171955155115, name='Inner1')
Clade(branch_length=0.111111111111, name='Epsilon')
Clade(branch_length=0.111111111111, name='Delta')
Clade(branch_length=0.0673103855608, name='Inner3')
Clade(branch_length=0.0907558806655, name='Inner2')
Clade(branch_length=0.125, name='Gamma')
Clade(branch_length=0.125, name='Beta')
Clade(branch_length=0.215755880666, name='Alpha')
NJ Tree:
>>> njtree = constructor.nj(dm)
>>> print njtree
Tree(rooted=False)
Clade(name='Inner3')
Clade(branch_length=0.0142054862889, name='Inner2')
Clade(branch_length=0.239265540676, name='Inner1')
Clade(branch_length=0.0853101915988, name='Epsilon')
Clade(branch_length=0.136912030623, name='Delta')
Clade(branch_length=0.292306275042, name='Alpha')
Clade(branch_length=0.0747705106139, name='Beta')
Clade(branch_length=0.175229489386, name='Gamma')
"""
methods = ['nj', 'upgma']
def __init__(self, distance_calculator=None, method="nj"):
if (distance_calculator is None
or isinstance(distance_calculator, DistanceCalculator)):
self.distance_calculator = distance_calculator
else:
raise TypeError("Must provide a DistanceCalculator object.")
if isinstance(method, str) and method in self.methods:
self.method = method
else:
raise TypeError("Bad method: " + method +
". Available methods: " + ", ".join(self.methods))
def build_tree(self, msa):
if self.distance_calculator:
dm = self.distance_calculator.get_distance(msa)
tree = None
if self.method == 'upgma':
tree = self.upgma(dm)
else:
tree = self.nj(dm)
return tree
else:
raise TypeError("Must provide a DistanceCalculator object.")
def upgma(self, distance_matrix):
"""Construct and return an UPGMA tree.
Constructs and returns an Unweighted Pair Group Method
with Arithmetic mean (UPGMA) tree.
:Parameters:
distance_matrix : _DistanceMatrix
The distance matrix for tree construction.
"""
if not isinstance(distance_matrix, _DistanceMatrix):
raise TypeError("Must provide a _DistanceMatrix object.")
# make a copy of the distance matrix to be used
dm = copy.deepcopy(distance_matrix)
# init terminal clades
clades = [BaseTree.Clade(None, name) for name in dm.names]
# init minimum index
min_i = 0
min_j = 0
inner_count = 0
while len(dm) > 1:
min_dist = dm[1, 0]
# find minimum index
for i in range(1, len(dm)):
for j in range(0, i):
if min_dist >= dm[i, j]:
min_dist = dm[i, j]
min_i = i
min_j = j
# create clade
clade1 = clades[min_i]
clade2 = clades[min_j]
inner_count += 1
inner_clade = BaseTree.Clade(None, "Inner" + str(inner_count))
inner_clade.clades.append(clade1)
inner_clade.clades.append(clade2)
# assign branch length
if clade1.is_terminal():
clade1.branch_length = min_dist * 1.0 / 2
else:
clade1.branch_length = min_dist * \
1.0 / 2 - self._height_of(clade1)
if clade2.is_terminal():
clade2.branch_length = min_dist * 1.0 / 2
else:
clade2.branch_length = min_dist * \
1.0 / 2 - self._height_of(clade2)
# update node list
clades[min_j] = inner_clade
del clades[min_i]
# rebuild distance matrix,
# set the distances of new node at the index of min_j
for k in range(0, len(dm)):
if k != min_i and k != min_j:
dm[min_j, k] = (dm[min_i, k] + dm[min_j, k]) * 1.0 / 2
dm.names[min_j] = "Inner" + str(inner_count)
del dm[min_i]
inner_clade.branch_length = 0
return BaseTree.Tree(inner_clade)
def nj(self, distance_matrix):
"""Construct and return an Neighbor Joining tree.
:Parameters:
distance_matrix : _DistanceMatrix
The distance matrix for tree construction.
"""
if not isinstance(distance_matrix, _DistanceMatrix):
raise TypeError("Must provide a _DistanceMatrix object.")
# make a copy of the distance matrix to be used
dm = copy.deepcopy(distance_matrix)
# init terminal clades
clades = [BaseTree.Clade(None, name) for name in dm.names]
# init node distance
node_dist = [0] * len(dm)
# init minimum index
min_i = 0
min_j = 0
inner_count = 0
while len(dm) > 2:
# calculate nodeDist
for i in range(0, len(dm)):
node_dist[i] = 0
for j in range(0, len(dm)):
node_dist[i] += dm[i, j]
node_dist[i] = node_dist[i] / (len(dm) - 2)
# find minimum distance pair
min_dist = dm[1, 0] - node_dist[1] - node_dist[0]
min_i = 0
min_j = 1
for i in range(1, len(dm)):
for j in range(0, i):
temp = dm[i, j] - node_dist[i] - node_dist[j]
if min_dist > temp:
min_dist = temp
min_i = i
min_j = j
# create clade
clade1 = clades[min_i]
clade2 = clades[min_j]
inner_count += 1
inner_clade = BaseTree.Clade(None, "Inner" + str(inner_count))
inner_clade.clades.append(clade1)
inner_clade.clades.append(clade2)
# assign branch length
clade1.branch_length = (dm[min_i, min_j] + node_dist[min_i]
- node_dist[min_j]) / 2.0
clade2.branch_length = dm[min_i, min_j] - clade1.branch_length
# update node list
clades[min_j] = inner_clade
del clades[min_i]
# rebuild distance matrix,
# set the distances of new node at the index of min_j
for k in range(0, len(dm)):
if k != min_i and k != min_j:
dm[min_j, k] = (dm[min_i, k] + dm[min_j, k]
- dm[min_i, min_j]) / 2.0
dm.names[min_j] = "Inner" + str(inner_count)
del dm[min_i]
# set the last clade as one of the child of the inner_clade
root = None
if clades[0] == inner_clade:
clades[0].branch_length = 0
clades[1].branch_length = dm[1, 0]
clades[0].clades.append(clades[1])
root = clades[0]
else:
clades[0].branch_length = dm[1, 0]
clades[1].branch_length = 0
clades[1].clades.append(clades[0])
root = clades[1]
return BaseTree.Tree(root, rooted=False)
def _height_of(self, clade):
"""calculate clade height -- the longest path to any terminal."""
height = 0
if clade.is_terminal():
height = clade.branch_length
else:
height = height + max([self._height_of(c) for c in clade.clades])
return height
# #################### Tree Scoring and Searching Classes #####################
class Scorer(object):
"""Base class for all tree scoring methods"""
def get_score(self, tree, alignment):
"""Caller to get the score of a tree for the given alignment.
This should be implemented in subclass"""
raise NotImplementedError("Method not implemented!")
class TreeSearcher(object):
"""Base class for all tree searching methods"""
def search(self, starting_tree, alignment):
"""Caller to search the best tree with a starting tree.
This should be implemented in subclass"""
raise NotImplementedError("Method not implemented!")
class NNITreeSearcher(TreeSearcher):
"""Tree searching with Nearest Neighbor Interchanges (NNI) algorithm.
:Parameters:
scorer : ParsimonyScorer
parsimony scorer to calculate the parsimony score of
different trees during NNI algorithm.
"""
def __init__(self, scorer):
if isinstance(scorer, Scorer):
self.scorer = scorer
else:
raise TypeError("Must provide a Scorer object.")
def search(self, starting_tree, alignment):
"""Implement the TreeSearcher.search method.
:Parameters:
starting_tree : Tree
starting tree of NNI method.
alignment : MultipleSeqAlignment
multiple sequence alignment used to calculate parsimony
score of different NNI trees.
"""
return self._nni(starting_tree, alignment)
def _nni(self, starting_tree, alignment):
"""Search for the best parsimony tree using the NNI algorithm."""
best_tree = starting_tree
while True:
best_score = self.scorer.get_score(best_tree, alignment)
temp = best_score
for t in self._get_neighbors(best_tree):
score = self.scorer.get_score(t, alignment)
if score < best_score:
best_score = score
best_tree = t
# stop if no smaller score exist
if best_score >= temp:
break
return best_tree
def _get_neighbors(self, tree):
"""Get all neighbor trees of the given tree.
Currently only for binary rooted trees.
"""
# make child to parent dict
parents = {}
for clade in tree.find_clades():
if clade != tree.root:
node_path = tree.get_path(clade)
# cannot get the parent if the parent is root. Bug?
if len(node_path) == 1:
parents[clade] = tree.root
else:
parents[clade] = node_path[-2]
neighbors = []
root_childs = []
for clade in tree.get_nonterminals(order="level"):
if clade == tree.root:
left = clade.clades[0]
right = clade.clades[1]
root_childs.append(left)
root_childs.append(right)
if not left.is_terminal() and not right.is_terminal():
# make changes around the left_left clade
# left_left = left.clades[0]
left_right = left.clades[1]
right_left = right.clades[0]
right_right = right.clades[1]
# neightbor 1 (left_left + right_right)
del left.clades[1]
del right.clades[1]
left.clades.append(right_right)
right.clades.append(left_right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# neighbor 2 (left_left + right_left)
del left.clades[1]
del right.clades[0]
left.clades.append(right_left)
right.clades.append(right_right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# change back (left_left + left_right)
del left.clades[1]
del right.clades[0]
left.clades.append(left_right)
right.clades.insert(0, right_left)
elif clade in root_childs:
# skip root child
continue
else:
# method for other clades
# make changes around the parent clade
left = clade.clades[0]
right = clade.clades[1]
parent = parents[clade]
if clade == parent.clades[0]:
sister = parent.clades[1]
# neighbor 1 (parent + right)
del parent.clades[1]
del clade.clades[1]
parent.clades.append(right)
clade.clades.append(sister)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# neighbor 2 (parent + left)
del parent.clades[1]
del clade.clades[0]
parent.clades.append(left)
clade.clades.append(right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# change back (parent + sister)
del parent.clades[1]
del clade.clades[0]
parent.clades.append(sister)
clade.clades.insert(0, left)
else:
sister = parent.clades[0]
# neighbor 1 (parent + right)
del parent.clades[0]
del clade.clades[1]
parent.clades.insert(0, right)
clade.clades.append(sister)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# neighbor 2 (parent + left)
del parent.clades[0]
del clade.clades[0]
parent.clades.insert(0, left)
clade.clades.append(right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# change back (parent + sister)
del parent.clades[0]
del clade.clades[0]
parent.clades.insert(0, sister)
clade.clades.insert(0, left)
return neighbors
# ######################## Parsimony Classes ##########################
class ParsimonyScorer(Scorer):
"""Parsimony scorer with a scoring matrix.
This is a combination of Fitch algorithm and Sankoff algorithm.
See ParsimonyTreeConstructor for usage.
:Parameters:
matrix : _Matrix
scoring matrix used in parsimony score calculation.
"""
def __init__(self, matrix=None):
if not matrix or isinstance(matrix, _Matrix):
self.matrix = matrix
else:
raise TypeError("Must provide a _Matrix object.")
def get_score(self, tree, alignment):
"""Calculate and return the parsimony score given a tree and
the MSA using the Fitch algorithm without the penalty matrix
the Sankoff algorithm with the matrix"""
# make sure the tree is rooted and bifurcating
if not tree.is_bifurcating():
raise ValueError("The tree provided should be bifurcating.")
if not tree.rooted:
tree.root_at_midpoint()
# sort tree terminals and alignment
terms = tree.get_terminals()
terms.sort(key=lambda term: term.name)
alignment.sort()
if not all([t.name == a.id for t, a in zip(terms, alignment)]):
raise ValueError(
"Taxon names of the input tree should be the same with the alignment.")
# term_align = dict(zip(terms, alignment))
score = 0
for i in range(len(alignment[0])):
# parsimony score for column_i
score_i = 0
# get column
column_i = alignment[:, i]
# skip non-informative column
if column_i == len(column_i) * column_i[0]:
continue
# start calculating score_i using the tree and column_i
# Fitch algorithm without the penalty matrix
if not self.matrix:
# init by mapping terminal clades and states in column_i
clade_states = dict(zip(terms, [set([c]) for c in column_i]))
for clade in tree.get_nonterminals(order="postorder"):
clade_childs = clade.clades
left_state = clade_states[clade_childs[0]]
right_state = clade_states[clade_childs[1]]
state = left_state & right_state
if not state:
state = left_state | right_state
score_i = score_i + 1
clade_states[clade] = state
# Sankoff algorithm with the penalty matrix
else:
inf = float('inf')
# init score arrays for terminal clades
alphabet = self.matrix.names
length = len(alphabet)
clade_scores = {}
for j in range(len(column_i)):
array = [inf] * length
index = alphabet.index(column_i[j])
array[index] = 0
clade_scores[terms[j]] = array
# bottom up calculation
for clade in tree.get_nonterminals(order="postorder"):
clade_childs = clade.clades
left_score = clade_scores[clade_childs[0]]
right_score = clade_scores[clade_childs[1]]
array = []
for m in range(length):
min_l = inf
min_r = inf
for n in range(length):
sl = self.matrix[
alphabet[m], alphabet[n]] + left_score[n]
sr = self.matrix[
alphabet[m], alphabet[n]] + right_score[n]
if min_l > sl:
min_l = sl
if min_r > sr:
min_r = sr
array.append(min_l + min_r)
clade_scores[clade] = array
# minimum from root score
score_i = min(array)
# TODO: resolve internal states
score = score + score_i
return score
class ParsimonyTreeConstructor(TreeConstructor):
"""Parsimony tree constructor.
:Parameters:
searcher : TreeSearcher
tree searcher to search the best parsimony tree.
starting_tree : Tree
starting tree provided to the searcher.
Example
--------
>>> from Bio import AlignIO
>>> from TreeConstruction import *
>>> aln = AlignIO.read(open('Tests/TreeConstruction/msa.phy'), 'phylip')
>>> print aln
SingleLetterAlphabet() alignment with 5 rows and 13 columns
AACGTGGCCACAT Alpha
AAGGTCGCCACAC Beta
GAGATTTCCGCCT Delta
GAGATCTCCGCCC Epsilon
CAGTTCGCCACAA Gamma
>>> starting_tree = Phylo.read('Tests/TreeConstruction/nj.tre', 'newick')
>>> print tree
Tree(weight=1.0, rooted=False)
Clade(branch_length=0.0, name='Inner3')
Clade(branch_length=0.01421, name='Inner2')
Clade(branch_length=0.23927, name='Inner1')
Clade(branch_length=0.08531, name='Epsilon')
Clade(branch_length=0.13691, name='Delta')
Clade(branch_length=0.29231, name='Alpha')
Clade(branch_length=0.07477, name='Beta')
Clade(branch_length=0.17523, name='Gamma')
>>> from TreeConstruction import *
>>> scorer = ParsimonyScorer()
>>> searcher = NNITreeSearcher(scorer)
>>> constructor = ParsimonyTreeConstructor(searcher, starting_tree)
>>> pars_tree = constructor.build_tree(aln)
>>> print pars_tree
Tree(weight=1.0, rooted=True)
Clade(branch_length=0.0)
Clade(branch_length=0.197335, name='Inner1')
Clade(branch_length=0.13691, name='Delta')
Clade(branch_length=0.08531, name='Epsilon')
Clade(branch_length=0.041935, name='Inner2')
Clade(branch_length=0.01421, name='Inner3')
Clade(branch_length=0.17523, name='Gamma')
Clade(branch_length=0.07477, name='Beta')
Clade(branch_length=0.29231, name='Alpha')
"""
def __init__(self, searcher, starting_tree=None):
self.searcher = searcher
self.starting_tree = starting_tree
def build_tree(self, alignment):
"""Build the tree.
:Parameters:
alignment : MultipleSeqAlignment
multiple sequence alignment to calculate parsimony tree.
"""
# if starting_tree is none,
# create a upgma tree with 'identity' scoring matrix
if self.starting_tree is None:
dtc = DistanceTreeConstructor(DistanceCalculator("identity"),
"upgma")
self.starting_tree = dtc.build_tree(alignment)
return self.searcher.search(self.starting_tree, alignment)
|
|
"""
Title: Writing a training loop from scratch
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2019/03/01
Last modified: 2020/04/15
Description: Complete guide to writing low-level training & evaluation loops.
"""
"""
## Setup
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
"""
## Introduction
Keras provides default training and evaluation loops, `fit()` and `evaluate()`.
Their usage is covered in the guide
[Training & evaluation with the built-in methods](/guides/training_with_built_in_methods/).
If you want to customize the learning algorithm of your model while still leveraging
the convenience of `fit()`
(for instance, to train a GAN using `fit()`), you can subclass the `Model` class and
implement your own `train_step()` method, which
is called repeatedly during `fit()`. This is covered in the guide
[Customizing what happens in `fit()`](/guides/customizing_what_happens_in_fit/).
Now, if you want very low-level control over training & evaluation, you should write
your own training & evaluation loops from scratch. This is what this guide is about.
"""
"""
## Using the `GradientTape`: a first end-to-end example
Calling a model inside a `GradientTape` scope enables you to retrieve the gradients of
the trainable weights of the layer with respect to a loss value. Using an optimizer
instance, you can use these gradients to update these variables (which you can
retrieve using `model.trainable_weights`).
Let's consider a simple MNIST model:
"""
inputs = keras.Input(shape=(784,), name="digits")
x1 = layers.Dense(64, activation="relu")(inputs)
x2 = layers.Dense(64, activation="relu")(x1)
outputs = layers.Dense(10, name="predictions")(x2)
model = keras.Model(inputs=inputs, outputs=outputs)
"""
Let's train it using mini-batch gradient with a custom training loop.
First, we're going to need an optimizer, a loss function, and a dataset:
"""
# Instantiate an optimizer.
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Prepare the training dataset.
batch_size = 64
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784))
x_test = np.reshape(x_test, (-1, 784))
# Reserve 10,000 samples for validation.
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
# Prepare the training dataset.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
# Prepare the validation dataset.
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(batch_size)
"""
Here's our training loop:
- We open a `for` loop that iterates over epochs
- For each epoch, we open a `for` loop that iterates over the dataset, in batches
- For each batch, we open a `GradientTape()` scope
- Inside this scope, we call the model (forward pass) and compute the loss
- Outside the scope, we retrieve the gradients of the weights
of the model with regard to the loss
- Finally, we use the optimizer to update the weights of the model based on the
gradients
"""
epochs = 2
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
# Open a GradientTape to record the operations run
# during the forward pass, which enables auto-differentiation.
with tf.GradientTape() as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
logits = model(x_batch_train, training=True) # Logits for this minibatch
# Compute the loss value for this minibatch.
loss_value = loss_fn(y_batch_train, logits)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = tape.gradient(loss_value, model.trainable_weights)
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
optimizer.apply_gradients(zip(grads, model.trainable_weights))
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %s samples" % ((step + 1) * batch_size))
"""
## Low-level handling of metrics
Let's add metrics monitoring to this basic loop.
You can readily reuse the built-in metrics (or custom ones you wrote) in such training
loops written from scratch. Here's the flow:
- Instantiate the metric at the start of the loop
- Call `metric.update_state()` after each batch
- Call `metric.result()` when you need to display the current value of the metric
- Call `metric.reset_states()` when you need to clear the state of the metric
(typically at the end of an epoch)
Let's use this knowledge to compute `SparseCategoricalAccuracy` on validation data at
the end of each epoch:
"""
# Get model
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Prepare the metrics.
train_acc_metric = keras.metrics.SparseCategoricalAccuracy()
val_acc_metric = keras.metrics.SparseCategoricalAccuracy()
"""
Here's our training & evaluation loop:
"""
import time
epochs = 2
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
with tf.GradientTape() as tape:
logits = model(x_batch_train, training=True)
loss_value = loss_fn(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
# Update training metric.
train_acc_metric.update_state(y_batch_train, logits)
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %d samples" % ((step + 1) * batch_size))
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print("Training acc over epoch: %.4f" % (float(train_acc),))
# Reset training metrics at the end of each epoch
train_acc_metric.reset_states()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
val_logits = model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_states()
print("Validation acc: %.4f" % (float(val_acc),))
print("Time taken: %.2fs" % (time.time() - start_time))
"""
## Speeding-up your training step with `tf.function`
The default runtime in TensorFlow 2 is
[eager execution](https://www.tensorflow.org/guide/eager).
As such, our training loop above executes eagerly.
This is great for debugging, but graph compilation has a definite performance
advantage. Describing your computation as a static graph enables the framework
to apply global performance optimizations. This is impossible when
the framework is constrained to greedly execute one operation after another,
with no knowledge of what comes next.
You can compile into a static graph any function that takes tensors as input.
Just add a `@tf.function` decorator on it, like this:
"""
@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
train_acc_metric.update_state(y, logits)
return loss_value
"""
Let's do the same with the evaluation step:
"""
@tf.function
def test_step(x, y):
val_logits = model(x, training=False)
val_acc_metric.update_state(y, val_logits)
"""
Now, let's re-run our training loop with this compiled training step:
"""
import time
epochs = 2
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
loss_value = train_step(x_batch_train, y_batch_train)
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %d samples" % ((step + 1) * batch_size))
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print("Training acc over epoch: %.4f" % (float(train_acc),))
# Reset training metrics at the end of each epoch
train_acc_metric.reset_states()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
test_step(x_batch_val, y_batch_val)
val_acc = val_acc_metric.result()
val_acc_metric.reset_states()
print("Validation acc: %.4f" % (float(val_acc),))
print("Time taken: %.2fs" % (time.time() - start_time))
"""
Much faster, isn't it?
"""
"""
## Low-level handling of losses tracked by the model
Layers & models recursively track any losses created during the forward pass
by layers that call `self.add_loss(value)`. The resulting list of scalar loss
values are available via the property `model.losses`
at the end of the forward pass.
If you want to be using these loss components, you should sum them
and add them to the main loss in your training step.
Consider this layer, that creates an activity regularization loss:
"""
class ActivityRegularizationLayer(layers.Layer):
def call(self, inputs):
self.add_loss(1e-2 * tf.reduce_sum(inputs))
return inputs
"""
Let's build a really simple model that uses it:
"""
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = layers.Dense(64, activation="relu")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
"""
Here's what our training step should look like now:
"""
@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
# Add any extra losses created during the forward pass.
loss_value += sum(model.losses)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
train_acc_metric.update_state(y, logits)
return loss_value
"""
## Summary
Now you know everything there is to know about using built-in training loops and
writing your own from scratch.
To conclude, here's a simple end-to-end example that ties together everything
you've learned in this guide: a DCGAN trained on MNIST digits.
"""
"""
## End-to-end example: a GAN training loop from scratch
You may be familiar with Generative Adversarial Networks (GANs). GANs can generate new
images that look almost real, by learning the latent distribution of a training
dataset of images (the "latent space" of the images).
A GAN is made of two parts: a "generator" model that maps points in the latent
space to points in image space, a "discriminator" model, a classifier
that can tell the difference between real images (from the training dataset)
and fake images (the output of the generator network).
A GAN training loop looks like this:
1) Train the discriminator.
- Sample a batch of random points in the latent space.
- Turn the points into fake images via the "generator" model.
- Get a batch of real images and combine them with the generated images.
- Train the "discriminator" model to classify generated vs. real images.
2) Train the generator.
- Sample random points in the latent space.
- Turn the points into fake images via the "generator" network.
- Get a batch of real images and combine them with the generated images.
- Train the "generator" model to "fool" the discriminator and classify the fake images
as real.
For a much more detailed overview of how GANs works, see
[Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python).
Let's implement this training loop. First, create the discriminator meant to classify
fake vs real digits:
"""
discriminator = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.GlobalMaxPooling2D(),
layers.Dense(1),
],
name="discriminator",
)
discriminator.summary()
"""
Then let's create a generator network,
that turns latent vectors into outputs of shape `(28, 28, 1)` (representing
MNIST digits):
"""
latent_dim = 128
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
# We want to generate 128 coefficients to reshape into a 7x7x128 map
layers.Dense(7 * 7 * 128),
layers.LeakyReLU(alpha=0.2),
layers.Reshape((7, 7, 128)),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
],
name="generator",
)
"""
Here's the key bit: the training loop. As you can see it is quite straightforward. The
training step function only takes 17 lines.
"""
# Instantiate one optimizer for the discriminator and another for the generator.
d_optimizer = keras.optimizers.Adam(learning_rate=0.0003)
g_optimizer = keras.optimizers.Adam(learning_rate=0.0004)
# Instantiate a loss function.
loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
@tf.function
def train_step(real_images):
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
# Decode them to fake images
generated_images = generator(random_latent_vectors)
# Combine them with real images
combined_images = tf.concat([generated_images, real_images], axis=0)
# Assemble labels discriminating real from fake images
labels = tf.concat(
[tf.ones((batch_size, 1)), tf.zeros((real_images.shape[0], 1))], axis=0
)
# Add random noise to the labels - important trick!
labels += 0.05 * tf.random.uniform(labels.shape)
# Train the discriminator
with tf.GradientTape() as tape:
predictions = discriminator(combined_images)
d_loss = loss_fn(labels, predictions)
grads = tape.gradient(d_loss, discriminator.trainable_weights)
d_optimizer.apply_gradients(zip(grads, discriminator.trainable_weights))
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
# Assemble labels that say "all real images"
misleading_labels = tf.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
predictions = discriminator(generator(random_latent_vectors))
g_loss = loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, generator.trainable_weights)
g_optimizer.apply_gradients(zip(grads, generator.trainable_weights))
return d_loss, g_loss, generated_images
"""
Let's train our GAN, by repeatedly calling `train_step` on batches of images.
Since our discriminator and generator are convnets, you're going to want to
run this code on a GPU.
"""
import os
# Prepare the dataset. We use both the training & test MNIST digits.
batch_size = 64
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test])
all_digits = all_digits.astype("float32") / 255.0
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
dataset = tf.data.Dataset.from_tensor_slices(all_digits)
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
epochs = 1 # In practice you need at least 20 epochs to generate nice digits.
save_dir = "./"
for epoch in range(epochs):
print("\nStart epoch", epoch)
for step, real_images in enumerate(dataset):
# Train the discriminator & generator on one batch of real images.
d_loss, g_loss, generated_images = train_step(real_images)
# Logging.
if step % 200 == 0:
# Print metrics
print("discriminator loss at step %d: %.2f" % (step, d_loss))
print("adversarial loss at step %d: %.2f" % (step, g_loss))
# Save one generated image
img = keras.utils.array_to_img(generated_images[0] * 255.0, scale=False)
img.save(os.path.join(save_dir, "generated_img" + str(step) + ".png"))
# To limit execution time we stop after 10 steps.
# Remove the lines below to actually train the model!
if step > 10:
break
"""
That's it! You'll get nice-looking fake MNIST digits after just ~30s of training on the
Colab GPU.
"""
|
|
#!/usr/bin/env python3
# coding: utf-8
# vim: set ts=4 sts=4 sw=4 expandtab cc=80:
# Copyright (c) 2014, 2016, chys <admin@CHYS.INFO>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of chys <admin@CHYS.INFO> nor the names of other
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import os
import sys
import time
import numpy as np
try:
from . import _speedups
except ImportError:
_speedups = None
def is_pow2(v):
'''
>>> is_pow2(0)
False
>>> is_pow2(1)
True
>>> is_pow2(2)
True
>>> is_pow2(3)
False
'''
return v > 0 and not (v & (v - 1))
def make_numpy_array(values):
return np.array(values, dtype=np.uint32)
def most_common_element(arr, *, mode_cnt=_speedups and _speedups.mode_cnt):
"""Return the most common element of a numpy array"""
if mode_cnt:
res = mode_cnt(arr)
if res is not None:
return res[0]
u, indices = np.unique(arr, return_inverse=True)
return int(u[np.argmax(np.bincount(indices))])
def most_common_element_count(arr, *,
mode_cnt=_speedups and _speedups.mode_cnt):
"""Return the most common element of a numpy array and its count
>>> most_common_element_count(np.array([1,3,5,0,5,1,5], np.uint32))
(5, 3)
>>> most_common_element_count(np.array([1,3,5,0,5,1,5], np.int64))
(5, 3)
"""
if mode_cnt:
res = mode_cnt(arr)
if res is not None:
return res
u, indices = np.unique(arr, return_inverse=True)
bincnt = np.bincount(indices)
i = np.argmax(bincnt)
return int(u[i]), int(bincnt[i])
def is_const(array, *, is_const=_speedups and _speedups.is_const):
"""Returns if the given array is constant"""
if is_const:
res = is_const(array)
if res is not None:
return res
if array.size == 0:
return True
else:
return (array == array[0]).all()
def is_linear(array, *, is_linear=_speedups and _speedups.is_linear):
"""Returns if the given array is linear"""
if is_linear:
res = is_linear(array)
if res is not None:
return res
return is_const(slope_array(array, array.dtype.type))
def const_range(array, *, const_range=_speedups and _speedups.const_range):
"""Returns the max n, such that array[:n] is a constant array"""
if const_range:
res = const_range(array)
if res is not None:
return res
if array.size == 0:
return 0
k = (array != array[0]).tostring().find(1)
if k < 0:
k = array.size
return k
def range_limit(array, threshold):
'''
Return the max k, such that max(array[:k]) - min(array[:k]) < threshold
(array can be an iterator)
'''
ran = np.maximum.accumulate(array) - np.minimum.accumulate(array)
return int(ran.searchsorted(threshold))
def trim_brackets(s):
front = 0
while front < len(s) and s[front] == '(':
front += 1
if not front:
return s
back = 0
while back < len(s) and s[-1 - back] == ')':
back += 1
if not back:
return s
trim_cnt = min(front, back)
s = s[trim_cnt:-trim_cnt]
n = 0
m = 0
for c in s:
if c == '(':
n += 1
elif c == ')':
n -= 1
m = min(m, n)
if m < 0:
s = '('*-m + s + ')'*-m
return s
def compress_array(array, n):
'''
Compress several elements of one array into one.
'''
bits = 8 // n
lo = array.size % n or n
res = array[::n].copy()
for i in range(1, lo):
res |= array[i::n] << (i * bits)
for i in range(lo, n):
res[:-1] |= array[i::n] << (i * bits)
return res
def slope_array(array, dtype=np.int64, *,
slope_array=_speedups and _speedups.slope_array):
'''
slope_array is similar to np.diff, but with speedups for certain types.
Additionally, we support output types different than the input type.
>>> slope_array(np.array([1,2,4,0,2], np.uint32), np.int64).tolist()
[1, 2, -4, 2]
'''
if slope_array:
res = slope_array(array, dtype)
if res is not None:
return res
return np.array(array[1:], dtype) - np.array(array[:-1], dtype)
def gcd_many(array, *, gcd_many=_speedups and _speedups.gcd_many):
"""
>>> gcd_many(np.array([26, 39, 52], np.uint32))
13
>>> gcd_many(np.array([4, 8, 7], np.uint32))
1
"""
if gcd_many:
res = gcd_many(array)
if res is not None:
return res
res = 0
for v in array:
v = int(v)
if not v:
continue
if res < 2:
if not res:
res = v
continue
else:
break
# We could have used fractions.gcd, but here we do it ourselves for
# better performance (fractions.gcd has no C implementation)
while v:
res, v = v, res % v
return res
def gcd_reduce(array):
'''
Return the max gcd, such that is_const(v % gcd for v in array)
'''
array = np_unique(array)
return gcd_many(slope_array(array, array.dtype.type))
def np_unique(array, *, unique=_speedups and _speedups.unique):
'''np.unique with speedups for certain types
>>> np_unique(np.array([1,3,5,7,1,2,3,4], np.uint32)).tolist()
[1, 2, 3, 4, 5, 7]
>>> np_unique(np.array([1,3,5,7,1,2,3,4], np.int64)).tolist()
[1, 2, 3, 4, 5, 7]
'''
if unique:
res = unique(array)
if res is not None:
return res
return np.unique(array)
def np_min(array, *, min_max=_speedups and _speedups.min_max):
'''
>>> np_min(np.array([3,2,1,2,3], np.uint32))
1
'''
if min_max:
res = min_max(array, 0)
if res is not None:
return res
return int(array.min())
def np_max(array, *, min_max=_speedups and _speedups.min_max):
'''
>>> np_max(np.array([3,2,1,2,3], np.uint32))
3
'''
if min_max:
res = min_max(array, 1)
if res is not None:
return res
return int(array.max())
def np_min_by_chunk(array, chunk_size, *,
min_by_chunk=_speedups and _speedups.min_by_chunk):
'''Return the minimum values of each fixed-size chunk
_speedups only implements uint32
>>> np_min_by_chunk(np.array([1, 2, 2, 1, 3], np.uint32), 2).tolist()
[1, 1, 3]
>>> np_min_by_chunk(np.array([1, 2, 2, 1, 3], np.int64), 2).tolist()
[1, 1, 3]
'''
if min_by_chunk:
res = min_by_chunk(array, chunk_size)
if res is not None:
return res
n, = array.shape
chunks = (n + chunk_size - 1) // chunk_size
padded_size = chunks * chunk_size
if n == padded_size:
padded = array
else:
padded = np.pad(array, (0, padded_size - n), 'edge')
return np.min(np.reshape(padded, (chunks, chunk_size)), axis=1)
def np_range(array, *, min_max=_speedups and _speedups.min_max):
'''
>>> np_range(np.array([3,2,1,2,3], np.uint32))
2
'''
if min_max:
res = min_max(array, 2)
if res is not None:
return res
return int(array.max()) - int(array.min())
def np_array_equal(x, y, *, array_equal=_speedups and _speedups.array_equal):
'''
>>> np_array_equal(np.uint32([1, 2, 3]), np.uint32([1, 2, 3]))
True
>>> np_array_equal(np.uint32([1, 2, 3]), np.uint32([1, 2, 1]))
False
'''
if array_equal:
res = array_equal(x, y)
if res is not None:
return res
return (x == y).all()
def np_cycle(array, *, max_cycle=None,
np_array_equal=np_array_equal, range=range,
array_cycle=_speedups and _speedups.array_cycle):
'''Find minimun positive cycle of array.
_speedups only implements uint32
>>> np_cycle(np.array([25] * 54, dtype=np.uint32))
1
>>> np_cycle(np.array([25] * 54, dtype=np.int16))
1
>>> np_cycle(np.array(list(range(100)) * 9, dtype=np.uint32))
100
>>> np_cycle(np.array(list(range(100)) * 9, dtype=np.uint64))
100
>>> np_cycle(np.array(list(range(100)) * 9 + list(range(50)), dtype=np.uint32))
100
>>> np_cycle(np.array(list(range(100)) * 9 + list(range(50)), dtype=np.uint64))
100
>>> np_cycle(np.arange(100, dtype=np.uint32))
0
>>> np_cycle(np.arange(100, dtype=np.uint64))
0
'''
if array_cycle:
res = array_cycle(array, max_cycle or 0xffffffff)
if res is not None:
return res
n, = array.shape
if n < 2:
return 0
indices, = np.nonzero(array == array[0])
ind_n, = indices.shape
if ind_n == n: # array is const
return 1
indices = indices.astype(np.uint32)
max_cycle = max_cycle or n
for i in range(1, ind_n):
k = int(indices[i])
if k > max_cycle:
break
# Check whether indices are likely correct
ok = True
for j in range(i * 2, ind_n, i):
if indices[j] != indices[j - i] + k:
ok = False
break
if not ok:
continue
# Compare array slices
tail = n % k
if tail and not np_array_equal(array[:tail], array[-tail:]):
continue
ref = array[:k]
for j in range(k, n - k + 1, k):
if not np_array_equal(ref, array[j:j+k]):
break
else:
return k
return 0
__thread_profiles = []
def __profiling_enabled():
# We hope it can be set or unset at run-time, so put the check here
return bool(os.environ.get('pyCxxLookup_Profiling'))
def profiling(func):
@functools.wraps(func)
def _func(*args, **kwargs):
if not __profiling_enabled():
return func(*args, **kwargs)
__thread_profiles.clear()
from cProfile import Profile
import pstats
import time
wall_clock = time.time()
pr = Profile()
pr.enable()
try:
return func(*args, **kwargs)
finally:
pr.disable()
wall_time = time.time() - wall_clock
print(f'Wall time: {wall_time:.3f} seconds')
ps = pstats.Stats(pr, *__thread_profiles, stream=sys.stderr)
__thread_profiles.clear()
stat_count = 30
ps.sort_stats('cumulative', 'stdname')
ps.print_stats(stat_count)
ps.sort_stats('tottime', 'stdname')
ps.print_stats(stat_count)
if os.environ.get('pyCxxLookup_Profiling_Callers'):
ps.print_callers()
return _func
def thread_profiling(func):
'''This decorator should be applied to functions run in separate threads,
so that the results are collected to the main thread
'''
@functools.wraps(func)
def _func(*args, **kwargs):
if not __profiling_enabled():
return func(*args, **kwargs)
from cProfile import Profile
try:
pr = __thread_profiles.pop()
except IndexError:
pr = Profile()
pr.enable()
try:
return func(*args, **kwargs)
finally:
pr.disable()
__thread_profiles.append(pr)
return _func
class cached_property:
def __init__(self, func):
self.func = func
self.__doc__ = func.__doc__
self.__name__ = func.__name__
self.__module__ = func.__module__
def __get__(self, obj, cls):
if obj is None:
return self
func = self.func
res = obj.__dict__[func.__name__] = func(obj)
return res
class __ThreadPoolTask:
empty = object()
def __init__(self, f, arglist):
self._f = f
self._pending = list(enumerate(arglist))
self._pending.reverse()
self.results = [self.empty] * len(arglist)
@thread_profiling
def run_in_thread(self, _):
return self.run()
def run(self):
while True:
try:
i, arg = self._pending.pop()
except IndexError:
return
self.results[i] = self._f(arg)
def thread_pool_map(thread_pool, f, arglist):
'''This function is like ThreadPool.map, but it can be safely called
from a thread which is itself in the pool, without the possibility
of deadlocking.
This function also takes care of profiling already.
'''
n = len(arglist)
if n == 0:
return []
if n == 1:
return [f(arglist[0])]
task = __ThreadPoolTask(f, arglist)
async_list = [thread_pool.apply_async(task.run_in_thread)
for _ in range(min(os.cpu_count(), n) - 1)]
task.run()
# Because task.run() has completed, we're certain that the pending
# list is empty, but possibly not all results have been filled in.
# We periodically ping all async results so that exceptions are propagated.
# Don't use task.empty in task.results here -- __eq__ may be overriden
while any(res is task.empty for res in task.results):
for i in range(len(async_list) - 1, -1, -1):
async_obj = async_list[i]
if async_obj.ready():
async_obj.get()
del async_list[i]
time.sleep(0.01)
# Now all results are successful. Now we don't really have to wait for
# the async results. We're certain they have thrown no exception, and
# they are actually not necessarily ready.
return task.results
|
|
from collections import namedtuple
from typing import Dict, List
from eth_utils import to_canonical_address
from raiden.constants import GENESIS_BLOCK_NUMBER, UINT64_MAX
from raiden.exceptions import InvalidBlockNumberInput, UnknownEventType
from raiden.network.blockchain_service import BlockChainService
from raiden.network.proxies.secret_registry import SecretRegistry
from raiden.utils import pex, typing
from raiden.utils.filters import (
StatelessFilter,
decode_event,
get_filter_args_for_all_events_from_channel,
)
from raiden.utils.typing import (
Address,
BlockSpecification,
ChannelID,
Optional,
PaymentNetworkID,
TokenNetworkAddress,
)
from raiden_contracts.constants import (
CONTRACT_SECRET_REGISTRY,
CONTRACT_TOKEN_NETWORK,
CONTRACT_TOKEN_NETWORK_REGISTRY,
EVENT_TOKEN_NETWORK_CREATED,
ChannelEvent,
)
from raiden_contracts.contract_manager import ContractManager
EventListener = namedtuple("EventListener", ("event_name", "filter", "abi"))
# `new_filter` uses None to signal the absence of topics filters
ALL_EVENTS = None
def verify_block_number(number: typing.BlockSpecification, argname: str):
if isinstance(number, int) and (number < 0 or number > UINT64_MAX):
raise InvalidBlockNumberInput(
"Provided block number {} for {} is invalid. Has to be in the range "
"of [0, UINT64_MAX]".format(number, argname)
)
def get_contract_events(
chain: BlockChainService,
abi: List[Dict],
contract_address: Address,
topics: Optional[List[str]],
from_block: BlockSpecification,
to_block: BlockSpecification,
) -> List[Dict]:
""" Query the blockchain for all events of the smart contract at
`contract_address` that match the filters `topics`, `from_block`, and
`to_block`.
"""
verify_block_number(from_block, "from_block")
verify_block_number(to_block, "to_block")
events = chain.client.get_filter_events(
contract_address, topics=topics, from_block=from_block, to_block=to_block
)
result = []
for event in events:
decoded_event = dict(decode_event(abi, event))
if event.get("blockNumber"):
decoded_event["block_number"] = event["blockNumber"]
del decoded_event["blockNumber"]
result.append(decoded_event)
return result
def get_token_network_registry_events(
chain: BlockChainService,
token_network_registry_address: PaymentNetworkID,
contract_manager: ContractManager,
events: Optional[List[str]] = ALL_EVENTS,
from_block: BlockSpecification = GENESIS_BLOCK_NUMBER,
to_block: BlockSpecification = "latest",
) -> List[Dict]:
""" Helper to get all events of the Registry contract at `registry_address`. """
return get_contract_events(
chain=chain,
abi=contract_manager.get_contract_abi(CONTRACT_TOKEN_NETWORK_REGISTRY),
contract_address=Address(token_network_registry_address),
topics=events,
from_block=from_block,
to_block=to_block,
)
def get_token_network_events(
chain: BlockChainService,
token_network_address: Address,
contract_manager: ContractManager,
events: Optional[List[str]] = ALL_EVENTS,
from_block: BlockSpecification = GENESIS_BLOCK_NUMBER,
to_block: BlockSpecification = "latest",
) -> List[Dict]:
""" Helper to get all events of the ChannelManagerContract at `token_address`. """
return get_contract_events(
chain,
contract_manager.get_contract_abi(CONTRACT_TOKEN_NETWORK),
token_network_address,
events,
from_block,
to_block,
)
def get_all_netting_channel_events(
chain: BlockChainService,
token_network_address: TokenNetworkAddress,
netting_channel_identifier: ChannelID,
contract_manager: ContractManager,
from_block: BlockSpecification = GENESIS_BLOCK_NUMBER,
to_block: BlockSpecification = "latest",
) -> List[Dict]:
""" Helper to get all events of a NettingChannelContract. """
filter_args = get_filter_args_for_all_events_from_channel(
token_network_address=token_network_address,
channel_identifier=netting_channel_identifier,
contract_manager=contract_manager,
from_block=from_block,
to_block=to_block,
)
return get_contract_events(
chain,
contract_manager.get_contract_abi(CONTRACT_TOKEN_NETWORK),
typing.Address(token_network_address),
filter_args["topics"],
from_block,
to_block,
)
def decode_event_to_internal(abi, log_event):
""" Enforce the binary for internal usage. """
# Note: All addresses inside the event_data must be decoded.
decoded_event = decode_event(abi, log_event)
if not decoded_event:
raise UnknownEventType()
# copy the attribute dict because that data structure is immutable
data = dict(decoded_event)
args = dict(data["args"])
data["args"] = args
# translate from web3's to raiden's name convention
data["block_number"] = log_event.pop("blockNumber")
data["transaction_hash"] = log_event.pop("transactionHash")
data["block_hash"] = bytes(log_event.pop("blockHash"))
assert data["block_number"], "The event must have the block_number"
assert data["transaction_hash"], "The event must have the transaction hash field"
event = data["event"]
if event == EVENT_TOKEN_NETWORK_CREATED:
args["token_network_address"] = to_canonical_address(args["token_network_address"])
args["token_address"] = to_canonical_address(args["token_address"])
elif event == ChannelEvent.OPENED:
args["participant1"] = to_canonical_address(args["participant1"])
args["participant2"] = to_canonical_address(args["participant2"])
elif event == ChannelEvent.DEPOSIT:
args["participant"] = to_canonical_address(args["participant"])
elif event == ChannelEvent.BALANCE_PROOF_UPDATED:
args["closing_participant"] = to_canonical_address(args["closing_participant"])
elif event == ChannelEvent.CLOSED:
args["closing_participant"] = to_canonical_address(args["closing_participant"])
elif event == ChannelEvent.UNLOCKED:
args["participant"] = to_canonical_address(args["participant"])
args["partner"] = to_canonical_address(args["partner"])
return Event(originating_contract=to_canonical_address(log_event["address"]), event_data=data)
class Event:
def __init__(self, originating_contract, event_data):
self.originating_contract = originating_contract
self.event_data = event_data
def __repr__(self):
return "<Event contract: {} event: {}>".format(
pex(self.originating_contract), self.event_data
)
class BlockchainEvents:
""" Events polling. """
def __init__(self):
self.event_listeners = list()
def poll_blockchain_events(self, block_number: typing.BlockNumber):
""" Poll for new blockchain events up to `block_number`. """
for event_listener in self.event_listeners:
assert isinstance(event_listener.filter, StatelessFilter)
for log_event in event_listener.filter.get_new_entries(block_number):
yield decode_event_to_internal(event_listener.abi, log_event)
def uninstall_all_event_listeners(self):
for listener in self.event_listeners:
if listener.filter.filter_id:
listener.filter.web3.eth.uninstallFilter(listener.filter.filter_id)
self.event_listeners = list()
def add_event_listener(self, event_name, eth_filter, abi):
existing_listeners = [x.event_name for x in self.event_listeners]
if event_name in existing_listeners:
return
event = EventListener(event_name, eth_filter, abi)
self.event_listeners.append(event)
def add_token_network_registry_listener(
self,
token_network_registry_proxy,
contract_manager,
from_block: typing.BlockSpecification = "latest",
):
token_new_filter = token_network_registry_proxy.tokenadded_filter(from_block=from_block)
token_network_registry_address = token_network_registry_proxy.address
self.add_event_listener(
"TokenNetworkRegistry {}".format(pex(token_network_registry_address)),
token_new_filter,
contract_manager.get_contract_abi(CONTRACT_TOKEN_NETWORK_REGISTRY),
)
def add_token_network_listener(
self,
token_network_proxy,
contract_manager: ContractManager,
from_block: typing.BlockSpecification = "latest",
):
token_network_filter = token_network_proxy.all_events_filter(from_block=from_block)
token_network_address = token_network_proxy.address
self.add_event_listener(
"TokenNetwork {}".format(pex(token_network_address)),
token_network_filter,
contract_manager.get_contract_abi(CONTRACT_TOKEN_NETWORK),
)
def add_secret_registry_listener(
self,
secret_registry_proxy: SecretRegistry,
contract_manager: ContractManager,
from_block: typing.BlockSpecification = "latest",
):
secret_registry_filter = secret_registry_proxy.secret_registered_filter(
from_block=from_block
)
secret_registry_address = secret_registry_proxy.address
self.add_event_listener(
"SecretRegistry {}".format(pex(secret_registry_address)),
secret_registry_filter,
contract_manager.get_contract_abi(CONTRACT_SECRET_REGISTRY),
)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import heapq
import eventlet
import six
from oslo_log import log as logging
import nova.conf
from nova import context
from nova import exception
from nova.i18n import _
LOG = logging.getLogger(__name__)
CELL_FAIL_SENTINELS = (context.did_not_respond_sentinel,
context.raised_exception_sentinel)
CONF = nova.conf.CONF
class RecordSortContext(object):
def __init__(self, sort_keys, sort_dirs):
self.sort_keys = sort_keys
self.sort_dirs = sort_dirs
def compare_records(self, rec1, rec2):
"""Implements cmp(rec1, rec2) for the first key that is different.
Adjusts for the requested sort direction by inverting the result
as needed.
"""
for skey, sdir in zip(self.sort_keys, self.sort_dirs):
resultflag = 1 if sdir == 'desc' else -1
if rec1[skey] < rec2[skey]:
return resultflag
elif rec1[skey] > rec2[skey]:
return resultflag * -1
return 0
class RecordWrapper(object):
"""Wrap a DB object from the database so it is sortable.
We use heapq.merge() below to do the merge sort of things from the
cell databases. That routine assumes it can use regular python
operators (> and <) on the contents. Since that won't work with
instances from the database (and depends on the sort keys/dirs),
we need this wrapper class to provide that.
Implementing __lt__ is enough for heapq.merge() to do its work.
"""
def __init__(self, ctx, sort_ctx, db_record):
self.cell_uuid = ctx.cell_uuid
self._sort_ctx = sort_ctx
self._db_record = db_record
def __lt__(self, other):
# NOTE(danms): This makes us always sort failure sentinels
# higher than actual results. We do this so that they bubble
# up in the get_records_sorted() feeder loop ahead of anything
# else, and so that the implementation of RecordSortContext
# never sees or has to handle the sentinels. If we did not
# sort these to the top then we could potentially return
# $limit results from good cells before we noticed the failed
# cells, and would not properly report them as failed for
# fix-up in the higher layers.
if self._db_record in CELL_FAIL_SENTINELS:
return True
elif other._db_record in CELL_FAIL_SENTINELS:
return False
r = self._sort_ctx.compare_records(self._db_record,
other._db_record)
# cmp(x, y) returns -1 if x < y
return r == -1
def query_wrapper(ctx, fn, *args, **kwargs):
"""This is a helper to run a query with predictable fail semantics.
This is a generator which will mimic the scatter_gather_cells() behavior
by honoring a timeout and catching exceptions, yielding the usual
sentinel objects instead of raising. It wraps these in RecordWrapper
objects, which will prioritize them to the merge sort, causing them to
be handled by the main get_objects_sorted() feeder loop quickly and
gracefully.
"""
with eventlet.timeout.Timeout(context.CELL_TIMEOUT, exception.CellTimeout):
try:
for record in fn(ctx, *args, **kwargs):
yield record
except exception.CellTimeout:
# Here, we yield a RecordWrapper (no sort_ctx needed since
# we won't call into the implementation's comparison routines)
# wrapping the sentinel indicating timeout.
yield RecordWrapper(ctx, None, context.did_not_respond_sentinel)
raise StopIteration
except Exception:
# Here, we yield a RecordWrapper (no sort_ctx needed since
# we won't call into the implementation's comparison routines)
# wrapping the sentinel indicating failure.
yield RecordWrapper(ctx, None, context.raised_exception_sentinel)
raise StopIteration
@six.add_metaclass(abc.ABCMeta)
class CrossCellLister(object):
"""An implementation of a cross-cell efficient lister.
This primarily provides a listing implementation for fetching
records from across multiple cells, paginated and sorted
appropriately. The external interface is the get_records_sorted()
method. You should implement this if you need to efficiently list
your data type from cell databases.
"""
def __init__(self, sort_ctx, cells=None, batch_size=None):
self.sort_ctx = sort_ctx
self.cells = cells
self.batch_size = batch_size
self._cells_responded = set()
self._cells_failed = set()
self._cells_timed_out = set()
@property
def cells_responded(self):
"""A list of uuids representing those cells that returned a successful
result.
"""
return list(self._cells_responded)
@property
def cells_failed(self):
"""A list of uuids representing those cells that failed to return a
successful result.
"""
return list(self._cells_failed)
@property
def cells_timed_out(self):
"""A list of uuids representing those cells that timed out while being
contacted.
"""
return list(self._cells_timed_out)
@property
@abc.abstractmethod
def marker_identifier(self):
"""Return the name of the property used as the marker identifier.
For instances (and many other types) this is 'uuid', but could also
be things like 'id' or anything else used as the marker identifier
when fetching a page of results.
"""
pass
@abc.abstractmethod
def get_marker_record(self, ctx, marker_id):
"""Get the cell UUID and instance of the marker record by id.
This needs to look up the marker record in whatever cell it is in
and return it. It should be populated with values corresponding to
what is in self.sort_ctx.sort_keys.
:param ctx: A RequestContext
:param marker_id: The identifier of the marker to find
:returns: A tuple of cell_uuid where the marker was found and an
instance of the marker from the database
:raises: MarkerNotFound if the marker does not exist
"""
pass
@abc.abstractmethod
def get_marker_by_values(self, ctx, values):
"""Get the identifier of the marker record by value.
When we need to paginate across cells, the marker record exists
in only one of those cells. The rest of the cells must decide on
a record to be their equivalent marker with which to return the
next page of results. This must be done by value, based on the
values of the sort_keys properties on the actual marker, as if
the results were sorted appropriately and the actual marker existed
in each cell.
:param ctx: A RequestContext
:param values: The values of the sort_keys properties of fhe actual
marker instance
:returns: The identifier of the equivalent marker in the local database
"""
pass
@abc.abstractmethod
def get_by_filters(self, ctx, filters, limit, marker, **kwargs):
"""List records by filters, sorted and paginated.
This is the standard filtered/sorted list method for the data type
we are trying to list out of the database. Additional kwargs are
passsed through.
:param ctx: A RequestContext
:param filters: A dict of column=filter items
:param limit: A numeric limit on the number of results, or None
:param marker: The marker identifier, or None
:returns: A list of records
"""
pass
def get_records_sorted(self, ctx, filters, limit, marker, **kwargs):
"""Get a cross-cell list of records matching filters.
This iterates cells in parallel generating a unified and sorted
list of records as efficiently as possible. It takes care to
iterate the list as infrequently as possible. We wrap the results
in RecordWrapper objects so that they are sortable by
heapq.merge(), which requires that the '<' operator just works.
Our sorting requirements are encapsulated into the
RecordSortContext provided to the constructor for this object.
This function is a generator of records from the database like what you
would get from instance_get_all_by_filters_sort() in the DB API.
NOTE: Since we do these in parallel, a nonzero limit will be passed
to each database query, although the limit will be enforced in the
output of this function. Meaning, we will still query $limit from each
database, but only return $limit total results.
"""
if marker:
# A marker identifier was provided from the API. Call this
# the 'global' marker as it determines where we start the
# process across all cells. Look up the record in
# whatever cell it is in and record the values for the
# sort keys so we can find the marker instance in each
# cell (called the 'local' marker).
global_marker_cell, global_marker_record = self.get_marker_record(
ctx, marker)
global_marker_values = [global_marker_record[key]
for key in self.sort_ctx.sort_keys]
def do_query(cctx):
"""Generate RecordWrapper(record) objects from a cell.
We do this inside the thread (created by
scatter_gather_all_cells()) so that we return wrappers and
avoid having to iterate the combined result list in the
caller again. This is run against each cell by the
scatter_gather routine.
"""
# The local marker is an identifier of a record in a cell
# that is found by the special method
# get_marker_by_values(). It should be the next record
# in order according to the sort provided, but after the
# marker instance which may have been in another cell.
local_marker = None
# Since the regular DB query routines take a marker and assume that
# the marked record was the last entry of the previous page, we
# may need to prefix it to our result query if we're not the cell
# that had the actual marker record.
local_marker_prefix = []
marker_id = self.marker_identifier
if marker:
if cctx.cell_uuid == global_marker_cell:
local_marker = marker
else:
local_marker = self.get_marker_by_values(
cctx, global_marker_values)
if local_marker:
if local_marker != marker:
# We did find a marker in our cell, but it wasn't
# the global marker. Thus, we will use it as our
# marker in the main query below, but we also need
# to prefix that result with this marker instance
# since the result below will not return it and it
# has not been returned to the user yet. Note that
# we do _not_ prefix the marker instance if our
# marker was the global one since that has already
# been sent to the user.
local_marker_filters = copy.copy(filters)
if marker_id not in local_marker_filters:
# If an $id filter was provided, it will
# have included our marker already if this
# instance is desired in the output
# set. If it wasn't, we specifically query
# for it. If the other filters would have
# excluded it, then we'll get an empty set
# here and not include it in the output as
# expected.
local_marker_filters[marker_id] = [local_marker]
local_marker_prefix = self.get_by_filters(
cctx, local_marker_filters, limit=1, marker=None,
**kwargs)
else:
# There was a global marker but everything in our
# cell is _before_ that marker, so we return
# nothing. If we didn't have this clause, we'd
# pass marker=None to the query below and return a
# full unpaginated set for our cell.
return
if local_marker_prefix:
# Per above, if we had a matching marker object, that is
# the first result we should generate.
yield RecordWrapper(cctx, self.sort_ctx,
local_marker_prefix[0])
# If a batch size was provided, use that as the limit per
# batch. If not, then ask for the entire $limit in a single
# batch.
batch_size = self.batch_size or limit
# Keep track of how many we have returned in all batches
return_count = 0
# If limit was unlimited then keep querying batches until
# we run out of results. Otherwise, query until the total count
# we have returned exceeds the limit.
while limit is None or return_count < limit:
batch_count = 0
# Do not query a full batch if it would cause our total
# to exceed the limit
if limit:
query_size = min(batch_size, limit - return_count)
else:
query_size = batch_size
# Get one batch
query_result = self.get_by_filters(
cctx, filters,
limit=query_size or None, marker=local_marker,
**kwargs)
# Yield wrapped results from the batch, counting as we go
# (to avoid traversing the list to count). Also, update our
# local_marker each time so that local_marker is the end of
# this batch in order to find the next batch.
for item in query_result:
local_marker = item[self.marker_identifier]
yield RecordWrapper(cctx, self.sort_ctx, item)
batch_count += 1
# No results means we are done for this cell
if not batch_count:
break
return_count += batch_count
LOG.debug(('Listed batch of %(batch)i results from cell '
'out of %(limit)s limit. Returned %(total)i '
'total so far.'),
{'batch': batch_count,
'total': return_count,
'limit': limit or 'no'})
# NOTE(danms): The calls to do_query() will return immediately
# with a generator. There is no point in us checking the
# results for failure or timeout since we have not actually
# run any code in do_query() until the first iteration
# below. The query_wrapper() utility handles inline
# translation of failures and timeouts to sentinels which will
# be generated and consumed just like any normal result below.
if self.cells:
results = context.scatter_gather_cells(ctx, self.cells,
context.CELL_TIMEOUT,
query_wrapper, do_query)
else:
results = context.scatter_gather_all_cells(ctx,
query_wrapper, do_query)
# If a limit was provided, it was passed to the per-cell query
# routines. That means we have NUM_CELLS * limit items across
# results. So, we need to consume from that limit below and
# stop returning results. Call that total_limit since we will
# modify it in the loop below, but do_query() above also looks
# at the original provided limit.
total_limit = limit or 0
# Generate results from heapq so we can return the inner
# instance instead of the wrapper. This is basically free
# as it works as our caller iterates the results.
feeder = heapq.merge(*results.values())
while True:
try:
item = next(feeder)
except StopIteration:
return
if item._db_record in CELL_FAIL_SENTINELS:
if not CONF.api.list_records_by_skipping_down_cells:
raise exception.NovaException(
_('Cell %s is not responding but configuration '
'indicates that we should fail.') % item.cell_uuid)
LOG.warning('Cell %s is not responding and hence is '
'being omitted from the results',
item.cell_uuid)
if item._db_record == context.did_not_respond_sentinel:
self._cells_timed_out.add(item.cell_uuid)
elif item._db_record == context.raised_exception_sentinel:
self._cells_failed.add(item.cell_uuid)
# We might have received one batch but timed out or failed
# on a later one, so be sure we fix the accounting.
if item.cell_uuid in self._cells_responded:
self._cells_responded.remove(item.cell_uuid)
continue
yield item._db_record
self._cells_responded.add(item.cell_uuid)
total_limit -= 1
if total_limit == 0:
# We'll only hit this if limit was nonzero and we just
# generated our last one
return
|
|
#
# CORE
# Copyright (c)2010-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# authors: core-dev@pf.itd.nrl.navy.mil
#
'''
vnode.py: SimpleJailNode and JailNode classes that implement the FreeBSD
jail-based virtual node.
'''
import os, signal, sys, subprocess, threading, string
import random, time
from core.misc.utils import *
from core.constants import *
from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf, Position
from core.emane.nodes import EmaneNode
from core.bsd.netgraph import *
checkexec([IFCONFIG_BIN, VIMAGE_BIN])
class VEth(PyCoreNetIf):
def __init__(self, node, name, localname, mtu = 1500, net = None,
start = True):
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
# name is the device name (e.g. ngeth0, ngeth1, etc.) before it is
# installed in a node; the Netgraph name is renamed to localname
# e.g. before install: name = ngeth0 localname = n0_0_123
# after install: name = eth0 localname = n0_0_123
self.localname = localname
self.ngid = None
self.net = None
self.pipe = None
self.addrlist = []
self.hwaddr = None
self.up = False
self.hook = "ether"
if start:
self.startup()
def startup(self):
hookstr = "%s %s" % (self.hook, self.hook)
ngname, ngid = createngnode(type="eiface", hookstr=hookstr,
name=self.localname)
self.name = ngname
self.ngid = ngid
check_call([IFCONFIG_BIN, ngname, "up"])
self.up = True
def shutdown(self):
if not self.up:
return
destroyngnode(self.localname)
self.up = False
def attachnet(self, net):
if self.net:
self.detachnet()
self.net = None
net.attach(self)
self.net = net
def detachnet(self):
if self.net is not None:
self.net.detach(self)
def addaddr(self, addr):
self.addrlist.append(addr)
def deladdr(self, addr):
self.addrlist.remove(addr)
def sethwaddr(self, addr):
self.hwaddr = addr
class TunTap(PyCoreNetIf):
'''TUN/TAP virtual device in TAP mode'''
def __init__(self, node, name, localname, mtu = None, net = None,
start = True):
raise NotImplementedError
class SimpleJailNode(PyCoreNode):
def __init__(self, session, objid = None, name = None, nodedir = None,
verbose = False):
PyCoreNode.__init__(self, session, objid, name)
self.nodedir = nodedir
self.verbose = verbose
self.pid = None
self.up = False
self.lock = threading.RLock()
self._mounts = []
def startup(self):
if self.up:
raise Exception, "already up"
vimg = [VIMAGE_BIN, "-c", self.name]
try:
os.spawnlp(os.P_WAIT, VIMAGE_BIN, *vimg)
except OSError:
raise Exception, ("vimage command not found while running: %s" % \
vimg)
self.info("bringing up loopback interface")
self.cmd([IFCONFIG_BIN, "lo0", "127.0.0.1"])
self.info("setting hostname: %s" % self.name)
self.cmd(["hostname", self.name])
self.cmd([SYSCTL_BIN, "vfs.morphing_symlinks=1"])
self.up = True
def shutdown(self):
if not self.up:
return
for netif in self.netifs():
netif.shutdown()
self._netif.clear()
del self.session
vimg = [VIMAGE_BIN, "-d", self.name]
try:
os.spawnlp(os.P_WAIT, VIMAGE_BIN, *vimg)
except OSError:
raise Exception, ("vimage command not found while running: %s" % \
vimg)
self.up = False
def cmd(self, args, wait = True):
if wait:
mode = os.P_WAIT
else:
mode = os.P_NOWAIT
tmp = call([VIMAGE_BIN, self.name] + args, cwd=self.nodedir)
if not wait:
tmp = None
if tmp:
self.warn("cmd exited with status %s: %s" % (tmp, str(args)))
return tmp
def cmdresult(self, args, wait = True):
cmdid, cmdin, cmdout, cmderr = self.popen(args)
result = cmdout.read()
result += cmderr.read()
cmdin.close()
cmdout.close()
cmderr.close()
if wait:
status = cmdid.wait()
else:
status = 0
return (status, result)
def popen(self, args):
cmd = [VIMAGE_BIN, self.name]
cmd.extend(args)
tmp = subprocess.Popen(cmd, stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE, cwd=self.nodedir)
return tmp, tmp.stdin, tmp.stdout, tmp.stderr
def icmd(self, args):
return os.spawnlp(os.P_WAIT, VIMAGE_BIN, VIMAGE_BIN, self.name, *args)
def term(self, sh = "/bin/sh"):
return os.spawnlp(os.P_WAIT, "xterm", "xterm", "-ut",
"-title", self.name, "-e", VIMAGE_BIN, self.name, sh)
def termcmdstring(self, sh = "/bin/sh"):
''' We add 'sudo' to the command string because the GUI runs as a
normal user.
'''
return "cd %s && sudo %s %s %s" % (self.nodedir, VIMAGE_BIN, self.name, sh)
def shcmd(self, cmdstr, sh = "/bin/sh"):
return self.cmd([sh, "-c", cmdstr])
def boot(self):
pass
def mount(self, source, target):
source = os.path.abspath(source)
self.info("mounting %s at %s" % (source, target))
self.addsymlink(path=target, file=None)
def umount(self, target):
self.info("unmounting '%s'" % target)
def newveth(self, ifindex = None, ifname = None, net = None):
self.lock.acquire()
try:
if ifindex is None:
ifindex = self.newifindex()
if ifname is None:
ifname = "eth%d" % ifindex
sessionid = self.session.shortsessionid()
name = "n%s_%s_%s" % (self.objid, ifindex, sessionid)
localname = name
ifclass = VEth
veth = ifclass(node = self, name = name, localname = localname,
mtu = 1500, net = net, start = self.up)
if self.up:
# install into jail
check_call([IFCONFIG_BIN, veth.name, "vnet", self.name])
# rename from "ngeth0" to "eth0"
self.cmd([IFCONFIG_BIN, veth.name, "name", ifname])
veth.name = ifname
try:
self.addnetif(veth, ifindex)
except:
veth.shutdown()
del veth
raise
return ifindex
finally:
self.lock.release()
def sethwaddr(self, ifindex, addr):
self._netif[ifindex].sethwaddr(addr)
if self.up:
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), "link",
str(addr)])
def addaddr(self, ifindex, addr):
if self.up:
if ':' in addr:
family = "inet6"
else:
family = "inet"
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), family, "alias",
str(addr)])
self._netif[ifindex].addaddr(addr)
def deladdr(self, ifindex, addr):
try:
self._netif[ifindex].deladdr(addr)
except ValueError:
self.warn("trying to delete unknown address: %s" % addr)
if self.up:
if ':' in addr:
family = "inet6"
else:
family = "inet"
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), family, "-alias",
str(addr)])
valid_deladdrtype = ("inet", "inet6", "inet6link")
def delalladdr(self, ifindex, addrtypes = valid_deladdrtype):
addr = self.getaddr(self.ifname(ifindex), rescan = True)
for t in addrtypes:
if t not in self.valid_deladdrtype:
raise ValueError, "addr type must be in: " + \
" ".join(self.valid_deladdrtype)
for a in addr[t]:
self.deladdr(ifindex, a)
# update cached information
self.getaddr(self.ifname(ifindex), rescan = True)
def ifup(self, ifindex):
if self.up:
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), "up"])
def newnetif(self, net = None, addrlist = [], hwaddr = None,
ifindex = None, ifname = None):
self.lock.acquire()
try:
ifindex = self.newveth(ifindex = ifindex, ifname = ifname,
net = net)
if net is not None:
self.attachnet(ifindex, net)
if hwaddr:
self.sethwaddr(ifindex, hwaddr)
for addr in maketuple(addrlist):
self.addaddr(ifindex, addr)
self.ifup(ifindex)
return ifindex
finally:
self.lock.release()
def attachnet(self, ifindex, net):
self._netif[ifindex].attachnet(net)
def detachnet(self, ifindex):
self._netif[ifindex].detachnet()
def addfile(self, srcname, filename):
shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % \
(filename, srcname, filename)
self.shcmd(shcmd)
def getaddr(self, ifname, rescan = False):
return None
#return self.vnodeclient.getaddr(ifname = ifname, rescan = rescan)
def addsymlink(self, path, file):
''' Create a symbolic link from /path/name/file ->
/tmp/pycore.nnnnn/@.conf/path.name/file
'''
dirname = path
if dirname and dirname[0] == "/":
dirname = dirname[1:]
dirname = dirname.replace("/", ".")
if file:
pathname = os.path.join(path, file)
sym = os.path.join(self.session.sessiondir, "@.conf", dirname, file)
else:
pathname = path
sym = os.path.join(self.session.sessiondir, "@.conf", dirname)
if os.path.islink(pathname):
if os.readlink(pathname) == sym:
# this link already exists - silently return
return
os.unlink(pathname)
else:
if os.path.exists(pathname):
self.warn("did not create symlink for %s since path " \
"exists on host" % pathname)
return
self.info("creating symlink %s -> %s" % (pathname, sym))
os.symlink(sym, pathname)
class JailNode(SimpleJailNode):
def __init__(self, session, objid = None, name = None,
nodedir = None, bootsh = "boot.sh", verbose = False,
start = True):
super(JailNode, self).__init__(session = session, objid = objid,
name = name, nodedir = nodedir,
verbose = verbose)
self.bootsh = bootsh
if not start:
return
# below here is considered node startup/instantiation code
self.makenodedir()
self.startup()
def boot(self):
self.session.services.bootnodeservices(self)
def validate(self):
self.session.services.validatenodeservices(self)
def startup(self):
self.lock.acquire()
try:
super(JailNode, self).startup()
#self.privatedir("/var/run")
#self.privatedir("/var/log")
finally:
self.lock.release()
def shutdown(self):
if not self.up:
return
self.lock.acquire()
# services are instead stopped when session enters datacollect state
#self.session.services.stopnodeservices(self)
try:
super(JailNode, self).shutdown()
finally:
self.rmnodedir()
self.lock.release()
def privatedir(self, path):
if path[0] != "/":
raise ValueError, "path not fully qualified: " + path
hostpath = os.path.join(self.nodedir, path[1:].replace("/", "."))
try:
os.mkdir(hostpath)
except OSError:
pass
except Exception, e:
raise Exception, e
self.mount(hostpath, path)
def opennodefile(self, filename, mode = "w"):
dirname, basename = os.path.split(filename)
#self.addsymlink(path=dirname, file=basename)
if not basename:
raise ValueError, "no basename for filename: " + filename
if dirname and dirname[0] == "/":
dirname = dirname[1:]
dirname = dirname.replace("/", ".")
dirname = os.path.join(self.nodedir, dirname)
if not os.path.isdir(dirname):
os.makedirs(dirname, mode = 0755)
hostfilename = os.path.join(dirname, basename)
return open(hostfilename, mode)
def nodefile(self, filename, contents, mode = 0644):
f = self.opennodefile(filename, "w")
f.write(contents)
os.chmod(f.name, mode)
f.close()
self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
|
import os
import sys
from configparser import ConfigParser
import pytest
import radon.cli as cli
import radon.complexity as cc_mod
from radon.cli.harvest import CCHarvester, Harvester, MIHarvester, RawHarvester
from radon.tests.test_cli_harvest import (
BASE_CONFIG,
CC_CONFIG,
MI_CONFIG,
RAW_CONFIG,
)
DIRNAME = os.path.dirname(__file__)
def func(a, b=2, c=[], d=None):
pass
def func2(*args, **kwargs):
pass
def func3(b=3, *args):
pass
def fake_to_terminal():
yield ('a', ('mystr',), {'error': True})
yield ('b', (), {})
yield (('p1', 'p2'), (), {'indent': 1})
def test_config_base_behavior():
c = cli.Config(a=2, b=3)
assert c.config_values == {'a': 2, 'b': 3}
assert c.a == 2
assert c.b == 3
def test_config_exceptions():
c = cli.Config(a=2)
assert c.__dict__, {'config_values': {'a': 2}}
with pytest.raises(AttributeError):
c.notexistent
def test_config_str():
assert str(cli.Config()) == '{}'
assert str(cli.Config(a=2)) == '{\'a\': 2}'
def test_config_eq():
assert cli.Config() == cli.Config()
assert cli.Config(a=2) == cli.Config(a=2)
assert cli.Config(a=2) != cli.Config(b=2)
def test_config_for():
assert cli.Config.from_function(func) == cli.Config(b=2, c=[], d=None)
assert cli.Config.from_function(func2) == cli.Config()
assert cli.Config.from_function(func3) == cli.Config(b=3)
def test_config_converts_types(mocker):
test_config = ConfigParser()
test_config.read_string(
u'''
[radon]
str_test = B
int_test = 19
bool_test = true
'''
)
config_mock = mocker.patch('radon.cli.FileConfig.file_config')
config_mock.return_value = test_config
cfg = cli.FileConfig()
assert cfg.get_value('bool_test', bool, False) == True
assert cfg.get_value('str_test', str, 'x') == 'B'
assert cfg.get_value('missing_test', str, 'Y') == 'Y'
assert cfg.get_value('int_test', int, 10) == 19
def test_cc(mocker, log_mock):
harv_mock = mocker.patch('radon.cli.CCHarvester')
harv_mock.return_value = mocker.sentinel.harvester
cli.cc(['-'], json=True)
harv_mock.assert_called_once_with(
['-'],
cli.Config(
min='A',
max='F',
exclude=None,
ignore=None,
show_complexity=False,
average=False,
order=getattr(cc_mod, 'SCORE'),
no_assert=False,
total_average=False,
show_closures=False,
include_ipynb=False,
ipynb_cells=False,
),
)
log_mock.assert_called_once_with(
mocker.sentinel.harvester,
codeclimate=False,
json=True,
stream=sys.stdout,
xml=False,
md=False
)
def test_raw(mocker, log_mock):
harv_mock = mocker.patch('radon.cli.RawHarvester')
harv_mock.return_value = mocker.sentinel.harvester
cli.raw(['-'], summary=True, json=True)
harv_mock.assert_called_once_with(
['-'],
cli.Config(
exclude=None,
ignore=None,
summary=True,
include_ipynb=False,
ipynb_cells=False,
),
)
log_mock.assert_called_once_with(
mocker.sentinel.harvester, stream=sys.stdout, json=True
)
def test_mi(mocker, log_mock):
harv_mock = mocker.patch('radon.cli.MIHarvester')
harv_mock.return_value = mocker.sentinel.harvester
cli.mi(['-'], show=True, multi=False)
harv_mock.assert_called_once_with(
['-'],
cli.Config(
min='A',
max='C',
exclude=None,
ignore=None,
show=True,
multi=False,
sort=False,
include_ipynb=False,
ipynb_cells=False,
),
)
log_mock.assert_called_once_with(
mocker.sentinel.harvester, stream=sys.stdout, json=False
)
def test_encoding(mocker, log_mock):
mi_cfg = cli.Config(**BASE_CONFIG.config_values)
mi_cfg.config_values.update(MI_CONFIG.config_values)
raw_cfg = cli.Config(**BASE_CONFIG.config_values)
raw_cfg.config_values.update(RAW_CONFIG.config_values)
mappings = {
MIHarvester: mi_cfg,
RawHarvester: raw_cfg,
CCHarvester: CC_CONFIG,
}
if sys.version_info[0] < 3:
target = 'data/__init__.py'
else:
target = 'data/py3unicode.py'
fnames = [
os.path.join(DIRNAME, target),
# This one will fail if detect_encoding() removes the first lines
# See #133
os.path.join(DIRNAME, 'data/no_encoding.py'),
]
for h_class, cfg in mappings.items():
for f in fnames:
harvester = h_class([f], cfg)
assert not any(
['error' in kw for msg, args, kw in harvester.to_terminal()]
)
@pytest.fixture
def stdout_mock(mocker):
return mocker.patch('radon.cli.sys.stdout.write')
def test_log(mocker, stdout_mock):
cli.log('msg')
cli.log('msg', indent=1)
cli.log('{0} + 1', 2)
cli.log('{0} + 1', 2, noformat=True)
stdout_mock.assert_has_calls(
[
mocker.call('msg\n'),
mocker.call(' msg\n'),
mocker.call('2 + 1\n'),
mocker.call('{0} + 1\n'),
]
)
assert stdout_mock.call_count == 4
def test_log_list(stdout_mock):
cli.log_list([])
cli.log_list(['msg'])
stdout_mock.assert_called_once_with('msg\n')
def test_log_error(mocker, stdout_mock):
reset_mock = mocker.patch('radon.cli.RESET')
red_mock = mocker.patch('radon.cli.RED')
bright_mock = mocker.patch('radon.cli.BRIGHT')
bright_mock.__str__.return_value = '@'
red_mock.__str__.return_value = '<|||>'
reset_mock.__str__.return_value = '__R__'
cli.log_error('mystr')
stdout_mock.assert_called_once_with('@<|||>ERROR__R__: mystr\n')
def test_log_result(mocker, stdout_mock):
le_mock = mocker.patch('radon.cli.log_error')
ll_mock = mocker.patch('radon.cli.log_list')
log_mock = mocker.patch('radon.cli.log')
h = mocker.Mock(spec=Harvester)
h.as_json.return_value = mocker.sentinel.json
h.as_xml.return_value = mocker.sentinel.xml
h.as_md.return_value = mocker.sentinel.md
h.to_terminal.side_effect = fake_to_terminal
cli.log_result(h, json=True)
h.as_json.assert_called_once_with()
h.as_json.reset_mock()
cli.log_result(h, json=True, xml=True, md=True)
h.as_json.assert_called_once_with()
assert h.as_xml.call_count == 0
assert h.as_md.call_count == 0
cli.log_result(h, xml=True)
h.as_xml.assert_called_once_with()
cli.log_result(h, md=True)
h.as_md.assert_called_once_with()
cli.log_result(h)
h.to_terminal.assert_called_once_with()
log_mock.assert_has_calls(
[
mocker.call(mocker.sentinel.json, json=True, noformat=True),
mocker.call(
mocker.sentinel.json, json=True, noformat=True, xml=True, md=True
),
mocker.call(mocker.sentinel.xml, noformat=True, xml=True),
mocker.call(mocker.sentinel.md, noformat=True, md=True),
mocker.call('a', error=True),
]
)
le_mock.assert_called_once_with('mystr', indent=1)
ll_mock.assert_has_calls(
[mocker.call(['b']), mocker.call(('p1', 'p2'), indent=1)]
)
|
|
#!/usr/bin/env python3
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
import imp
from myPersonalFunctions import *
import glob
import numpy
# Useful codes
# os.system("awk '{print $NF}' all_wham.dat > e_total")
# tr " " "\n"
# sed 1d
# sort -u -k 3
# sed -e 's/+T//'
mypath = os.environ["PATH"]
os.environ["PATH"] = "/home/wl45/python/bin:/home/wl45/opt:" + mypath
my_env = os.environ.copy()
parser = argparse.ArgumentParser(description="Glue is for code needs constant changes to meet various needs\
of each task")
# parser.add_argument("protein", help="the name of protein")
# parser.add_argument("template", help="the name of template file")
parser.add_argument("-t", "--test", help="test ", action="store_true", default=False)
parser.add_argument("--plot", action="store_true", default=False)
parser.add_argument("-d", "--debug", action="store_true", default=False)
parser.add_argument("--protein", default="2xov")
parser.add_argument("--dimension", type=int, default=1)
parser.add_argument("-f", "--freeEnergy", action="store_true", default=False)
parser.add_argument("--move", action="store_true", default=False)
parser.add_argument("-m", "--mode", type=int, default=1)
parser.add_argument("-n", "--number", type=int, default=40,
help="Number of simulation run")
args = parser.parse_args()
if(args.debug):
do = print
cd = print
else:
do = os.system
cd = os.chdir
if(args.freeEnergy):
# sim_list = "t250 t275 t300 t325 t350"
# temp_list = "250 275 300 325 350"
sim_list = "t250 t300 t350"
temp_list = "250 300 350"
# do("mult_calc_cv.sc . '{}' 40 '{}' 100 200 10 30 400 0.12 0.9 2xov q".format(sim_list, temp_list))
do("mult_calc_cv.sc . '{}' 66 '{}' 200 400 10 30 0.02 25 345 2xov q".format(sim_list, temp_list))
if(args.plot):
do("plotcontour.py pmf-200.dat -xmax 0.8 -xmin 0.2 -ymin 0.2 -ymax 0.8")
# ls */halfdata | sort -g | xargs cat > all_halfdata
# ls */tinydata | sort -g | xargs cat > all_tinydata
# awk '{print $3}' all_halfdata > p_total
# awk '{print $4}' all_halfdata > e_total
# ls [0-9]* |sort -g | xargs cat > data
def move_data_to_wham(temp_list):
for temp in temp_list:
do("cp ../data/{}/data data".format(temp))
do("awk '{print $1}' data > qn_t%i" % (temp))
do("awk '{print $2}' data > qc_t%i" % (temp))
do("awk '{print $3}' data > q_t%i" % (temp))
do("awk '{print $4}' data > energy_t%i" % (temp))
def write_simulation_list(temp_list):
with open("T_list", "w") as f:
for temp in temp_list:
f.write(str(temp)+"\n")
with open("sim_list", "w") as f:
for temp in temp_list:
f.write("t"+str(temp)+"\n")
def get_total_x(temp_list):
x_list = ["q", "qn", "qc", "energy"]
for x in x_list:
for temp in temp_list:
do("cat {0}_t{1} >> {0}_total".format(x, temp))
def replace_random(file_name):
do( # replace RANDOM with a radnom number
"sed -i.bak 's/RANDOM/'" +
str(randint(1, 10**6)) +
"'/g' {}".format(file_name))
if(args.move):
if(args.mode == 14):
n = 20
do("mkdir -p analysis/data")
run_list = [0, 1, 2, 3, 4, 5]
run_list = [6]
run_list = [7, 8, 9]
run_list = [0, 1, 2]
run_list = [0]
# run_list = [0, 1]
# run_list = [10, 11, 12, 13]
# run_list = [2]
# run_list = [3, 4, 5]
for j in run_list:
for i in range(n):
do("cp simulation/{0}/{1}/data analysis/data/{0}_{1}.dat".format(i, j))
if(args.mode == 13):
replace_random("*.in")
if(args.mode == 12):
seed(datetime.now())
n = args.number # default is 40
cwd = os.getcwd()
# do("cp -r simulation back_up")
for i in range(n):
cd("simulation/" + str(i))
do("cp -r ~/continue_run_addon/* .")
replace_random("2xov_1.in")
# do("cp 2xov_a206g.seq 2xov.seq")
do("cp 2xov_l155a.seq 2xov.seq")
do("mv addforce.dat energy.dat dump.lammpstrj wham.dat 0/")
do("sbatch run_1.slurm")
cd(cwd)
if(args.mode == 11):
n = 40
do("mkdir -p analysis/data")
for i in range(n):
do("cp simulation/{0}/addforce.dat analysis/data/{0}.dat".format(i))
if(args.mode == 10):
folder_name = "multi_temp_2"
do("mkdir "+folder_name)
cd(folder_name)
temp_list = [135, 160, 185, 210]
move_data_to_wham(temp_list)
write_simulation_list(temp_list)
get_total_x(temp_list)
sim_list = 't135 t160 t185 t210'
temp_list = '135 160 185 210'
do("mult_calc_cv.sc . '{}' 20 '{}' 150 350 10 30 200 0 0.95 2xov q".format(sim_list, temp_list))
cd("..")
if(args.mode == 9):
do("mult_calc_cv.sc . 't135 t160 t185 t210' 20 '135 160 185 210' 150 350 10 30 200 0 0.95 2xov q")
if(args.mode == 8):
x_list = ["q", "qn", "qc", "energy"]
temp_list = [135, 160, 185, 210]
for x in x_list:
for temp in temp_list:
do("cat {0}_t{1} >> {0}_total".format(x, temp))
if(args.mode == 7):
temp_list = [135, 160, 185, 210]
write_simulation_list(temp_list)
if(args.mode == 6):
temp_list = [135, 160, 185, 210]
move_data_to_wham(temp_list)
if(args.mode == 5):
temp_list = [135, 160, 185, 210]
for temp in temp_list:
cd(str(temp))
do("ls [0-9]* |sort -g | xargs cat > data")
cd("..")
if(args.mode == 4):
n = 20
temp_list = [135, 160, 185, 210]
# temp_list = ['300', '200', '250']
cwd = os.getcwd()
do("mkdir -p analysis")
for temp in temp_list:
do("mkdir -p analysis/{}".format(temp))
for i in range(n):
print(str(i))
do("cp simulation/{0}/{1}/data analysis/{0}/{1}".format(temp, i))
# do("cp simulation/{0}/{1}/small_data analysis/{0}/first_2000_{1}".format(temp, i))
cd("analysis")
do("mkdir data")
do("mv * data/")
if(args.mode == 1):
n = 40
# temp_list = [250, 275, 300, 325, 350]
temp_list = [200]
cwd = os.getcwd()
for temp in temp_list:
for i in range(n):
print(str(i))
do("mkdir -p analysis/data/{}".format(i))
do("cp simulation/{0}/{1}/halfdata analysis/data/{1}/".format(temp, i))
cd("analysis/data/{}".format(i))
do("tail -n 1000 halfdata > tinydata")
cd(cwd)
if(args.mode == 2):
array = []
cwd = os.getcwd()
print(cwd)
with open('folder_list', 'r') as ins:
for line in ins:
target = line.strip('\n')
temp = target.split("_")[1]
x = target.split("_")[3]
t1 = "simulation/" + target + "/"
cd(t1)
do("pwd")
do("cat halfdata >> ../t{}".format(temp))
cd(cwd)
if(args.mode == 3):
cwd = os.getcwd()
temp_list = [250, 300, 350]
for temp in temp_list:
do("cp ../data/t{} data".format(temp))
do("awk '{print $1}' data > qn_t%i" % (temp))
do("awk '{print $2}' data > qc_t%i" % (temp))
do("awk '{print $3}' data > q_t%i" % (temp))
do("awk '{print $4}' data > energy_t%i" % (temp))
# if(args.test):
# force_list = [1.0, 1.2, 1.4, 1.6, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5]
# force_list = [round(i*0.1,2) for i in range(10)]
# force_list = [round(i*0.1,2) for i in range(20)]
# # force_list = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
# for force in force_list:
# do("mkdir {}".format(force))
# cd("{}".format(force))
# do("cp ../freeEnergy.slurm .")
# do("cp ../metadatafile .")
# do(
# "sed -i.bak 's/FORCE/" +
# str(force) +
# "/g' freeEnergy.slurm")
# do("sbatch freeEnergy.slurm")
# cd("..")
|
|
# Copyright (c) 2015-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import hmac
from contextlib import contextmanager
from swift.common.constraints import check_metadata
from swift.common.http import is_success
from swift.common.middleware.crypto.crypto_utils import CryptoWSGIContext, \
dump_crypto_meta, append_crypto_meta, Crypto
from swift.common.request_helpers import get_object_transient_sysmeta, \
strip_user_meta_prefix, is_user_meta, update_etag_is_at_header, \
get_container_update_override_key
from swift.common.swob import Request, Match, HTTPException, \
HTTPUnprocessableEntity, wsgi_to_bytes, bytes_to_wsgi, normalize_etag
from swift.common.utils import get_logger, config_true_value, \
MD5_OF_EMPTY_STRING, md5
def encrypt_header_val(crypto, value, key):
"""
Encrypt a header value using the supplied key.
:param crypto: a Crypto instance
:param value: value to encrypt
:param key: crypto key to use
:returns: a tuple of (encrypted value, crypto_meta) where crypto_meta is a
dict of form returned by
:py:func:`~swift.common.middleware.crypto.Crypto.get_crypto_meta`
:raises ValueError: if value is empty
"""
if not value:
raise ValueError('empty value is not acceptable')
crypto_meta = crypto.create_crypto_meta()
crypto_ctxt = crypto.create_encryption_ctxt(key, crypto_meta['iv'])
enc_val = bytes_to_wsgi(base64.b64encode(
crypto_ctxt.update(wsgi_to_bytes(value))))
return enc_val, crypto_meta
def _hmac_etag(key, etag):
"""
Compute an HMAC-SHA256 using given key and etag.
:param key: The starting key for the hash.
:param etag: The etag to hash.
:returns: a Base64-encoded representation of the HMAC
"""
if not isinstance(etag, bytes):
etag = wsgi_to_bytes(etag)
result = hmac.new(key, etag, digestmod=hashlib.sha256).digest()
return base64.b64encode(result).decode()
class EncInputWrapper(object):
"""File-like object to be swapped in for wsgi.input."""
def __init__(self, crypto, keys, req, logger):
self.env = req.environ
self.wsgi_input = req.environ['wsgi.input']
self.path = req.path
self.crypto = crypto
self.body_crypto_ctxt = None
self.keys = keys
self.plaintext_md5 = None
self.ciphertext_md5 = None
self.logger = logger
self.install_footers_callback(req)
def _init_encryption_context(self):
# do this once when body is first read
if self.body_crypto_ctxt is None:
self.body_crypto_meta = self.crypto.create_crypto_meta()
body_key = self.crypto.create_random_key()
# wrap the body key with object key
self.body_crypto_meta['body_key'] = self.crypto.wrap_key(
self.keys['object'], body_key)
self.body_crypto_meta['key_id'] = self.keys['id']
self.body_crypto_ctxt = self.crypto.create_encryption_ctxt(
body_key, self.body_crypto_meta.get('iv'))
self.plaintext_md5 = md5(usedforsecurity=False)
self.ciphertext_md5 = md5(usedforsecurity=False)
def install_footers_callback(self, req):
# the proxy controller will call back for footer metadata after
# body has been sent
inner_callback = req.environ.get('swift.callback.update_footers')
# remove any Etag from headers, it won't be valid for ciphertext and
# we'll send the ciphertext Etag later in footer metadata
client_etag = req.headers.pop('etag', None)
override_header = get_container_update_override_key('etag')
container_listing_etag_header = req.headers.get(override_header)
def footers_callback(footers):
if inner_callback:
# pass on footers dict to any other callback that was
# registered before this one. It may override any footers that
# were set.
inner_callback(footers)
plaintext_etag = None
if self.body_crypto_ctxt:
plaintext_etag = self.plaintext_md5.hexdigest()
# If client (or other middleware) supplied etag, then validate
# against plaintext etag
etag_to_check = footers.get('Etag') or client_etag
if (etag_to_check is not None and
plaintext_etag != etag_to_check):
raise HTTPUnprocessableEntity(request=Request(self.env))
# override any previous notion of etag with the ciphertext etag
footers['Etag'] = self.ciphertext_md5.hexdigest()
# Encrypt the plaintext etag using the object key and persist
# as sysmeta along with the crypto parameters that were used.
encrypted_etag, etag_crypto_meta = encrypt_header_val(
self.crypto, plaintext_etag, self.keys['object'])
footers['X-Object-Sysmeta-Crypto-Etag'] = \
append_crypto_meta(encrypted_etag, etag_crypto_meta)
footers['X-Object-Sysmeta-Crypto-Body-Meta'] = \
dump_crypto_meta(self.body_crypto_meta)
# Also add an HMAC of the etag for use when evaluating
# conditional requests
footers['X-Object-Sysmeta-Crypto-Etag-Mac'] = _hmac_etag(
self.keys['object'], plaintext_etag)
else:
# No data was read from body, nothing was encrypted, so don't
# set any crypto sysmeta for the body, but do re-instate any
# etag provided in inbound request if other middleware has not
# already set a value.
if client_etag is not None:
footers.setdefault('Etag', client_etag)
# When deciding on the etag that should appear in container
# listings, look for:
# * override in the footer, otherwise
# * override in the header, and finally
# * MD5 of the plaintext received
# This may be None if no override was set and no data was read. An
# override value of '' will be passed on.
container_listing_etag = footers.get(
override_header, container_listing_etag_header)
if container_listing_etag is None:
container_listing_etag = plaintext_etag
if (container_listing_etag and
(container_listing_etag != MD5_OF_EMPTY_STRING or
plaintext_etag)):
# Encrypt the container-listing etag using the container key
# and a random IV, and use it to override the container update
# value, with the crypto parameters appended. We use the
# container key here so that only that key is required to
# decrypt all etag values in a container listing when handling
# a container GET request. Don't encrypt an EMPTY_ETAG
# unless there actually was some body content, in which case
# the container-listing etag is possibly conveying some
# non-obvious information.
val, crypto_meta = encrypt_header_val(
self.crypto, container_listing_etag,
self.keys['container'])
crypto_meta['key_id'] = self.keys['id']
footers[override_header] = \
append_crypto_meta(val, crypto_meta)
# else: no override was set and no data was read
req.environ['swift.callback.update_footers'] = footers_callback
def read(self, *args, **kwargs):
return self.readChunk(self.wsgi_input.read, *args, **kwargs)
def readline(self, *args, **kwargs):
return self.readChunk(self.wsgi_input.readline, *args, **kwargs)
def readChunk(self, read_method, *args, **kwargs):
chunk = read_method(*args, **kwargs)
if chunk:
self._init_encryption_context()
self.plaintext_md5.update(chunk)
# Encrypt one chunk at a time
ciphertext = self.body_crypto_ctxt.update(chunk)
self.ciphertext_md5.update(ciphertext)
return ciphertext
return chunk
class EncrypterObjContext(CryptoWSGIContext):
def __init__(self, encrypter, logger):
super(EncrypterObjContext, self).__init__(
encrypter, 'object', logger)
def _check_headers(self, req):
# Check the user-metadata length before encrypting and encoding
error_response = check_metadata(req, self.server_type)
if error_response:
raise error_response
def encrypt_user_metadata(self, req, keys):
"""
Encrypt user-metadata header values. Replace each x-object-meta-<key>
user metadata header with a corresponding
x-object-transient-sysmeta-crypto-meta-<key> header which has the
crypto metadata required to decrypt appended to the encrypted value.
:param req: a swob Request
:param keys: a dict of encryption keys
"""
prefix = get_object_transient_sysmeta('crypto-meta-')
user_meta_headers = [h for h in req.headers.items() if
is_user_meta(self.server_type, h[0]) and h[1]]
crypto_meta = None
for name, val in user_meta_headers:
short_name = strip_user_meta_prefix(self.server_type, name)
new_name = prefix + short_name
enc_val, crypto_meta = encrypt_header_val(
self.crypto, val, keys[self.server_type])
req.headers[new_name] = append_crypto_meta(enc_val, crypto_meta)
req.headers.pop(name)
# store a single copy of the crypto meta items that are common to all
# encrypted user metadata independently of any such meta that is stored
# with the object body because it might change on a POST. This is done
# for future-proofing - the meta stored here is not currently used
# during decryption.
if crypto_meta:
meta = dump_crypto_meta({'cipher': crypto_meta['cipher'],
'key_id': keys['id']})
req.headers[get_object_transient_sysmeta('crypto-meta')] = meta
def handle_put(self, req, start_response):
self._check_headers(req)
keys = self.get_keys(req.environ, required=['object', 'container'])
self.encrypt_user_metadata(req, keys)
enc_input_proxy = EncInputWrapper(self.crypto, keys, req, self.logger)
req.environ['wsgi.input'] = enc_input_proxy
resp = self._app_call(req.environ)
# If an etag is in the response headers and a plaintext etag was
# calculated, then overwrite the response value with the plaintext etag
# provided it matches the ciphertext etag. If it does not match then do
# not overwrite and allow the response value to return to client.
mod_resp_headers = self._response_headers
if (is_success(self._get_status_int()) and
enc_input_proxy.plaintext_md5):
plaintext_etag = enc_input_proxy.plaintext_md5.hexdigest()
ciphertext_etag = enc_input_proxy.ciphertext_md5.hexdigest()
mod_resp_headers = [
(h, v if (h.lower() != 'etag' or
normalize_etag(v) != ciphertext_etag)
else plaintext_etag)
for h, v in mod_resp_headers]
start_response(self._response_status, mod_resp_headers,
self._response_exc_info)
return resp
def handle_post(self, req, start_response):
"""
Encrypt the new object headers with a new iv and the current crypto.
Note that an object may have encrypted headers while the body may
remain unencrypted.
"""
self._check_headers(req)
keys = self.get_keys(req.environ)
self.encrypt_user_metadata(req, keys)
resp = self._app_call(req.environ)
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
@contextmanager
def _mask_conditional_etags(self, req, header_name):
"""
Calculate HMACs of etags in header value and append to existing list.
The HMACs are calculated in the same way as was done for the object
plaintext etag to generate the value of
X-Object-Sysmeta-Crypto-Etag-Mac when the object was PUT. The object
server can therefore use these HMACs to evaluate conditional requests.
HMACs of the etags are appended for the current root secrets and
historic root secrets because it is not known which of them may have
been used to generate the on-disk etag HMAC.
The existing etag values are left in the list of values to match in
case the object was not encrypted when it was PUT. It is unlikely that
a masked etag value would collide with an unmasked value.
:param req: an instance of swob.Request
:param header_name: name of header that has etags to mask
:return: True if any etags were masked, False otherwise
"""
masked = False
old_etags = req.headers.get(header_name)
if old_etags:
all_keys = self.get_multiple_keys(req.environ)
new_etags = []
for etag in Match(old_etags).tags:
if etag == '*':
new_etags.append(etag)
continue
new_etags.append('"%s"' % etag)
for keys in all_keys:
masked_etag = _hmac_etag(keys['object'], etag)
new_etags.append('"%s"' % masked_etag)
masked = True
req.headers[header_name] = ', '.join(new_etags)
try:
yield masked
finally:
if old_etags:
req.headers[header_name] = old_etags
def handle_get_or_head(self, req, start_response):
with self._mask_conditional_etags(req, 'If-Match') as masked1:
with self._mask_conditional_etags(req, 'If-None-Match') as masked2:
if masked1 or masked2:
update_etag_is_at_header(
req, 'X-Object-Sysmeta-Crypto-Etag-Mac')
resp = self._app_call(req.environ)
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
class Encrypter(object):
"""Middleware for encrypting data and user metadata.
By default all PUT or POST'ed object data and/or metadata will be
encrypted. Encryption of new data and/or metadata may be disabled by
setting the ``disable_encryption`` option to True. However, this middleware
should remain in the pipeline in order for existing encrypted data to be
read.
"""
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route="encrypter")
self.crypto = Crypto(conf)
self.disable_encryption = config_true_value(
conf.get('disable_encryption', 'false'))
def __call__(self, env, start_response):
# If override is set in env, then just pass along
if config_true_value(env.get('swift.crypto.override')):
return self.app(env, start_response)
req = Request(env)
if self.disable_encryption and req.method in ('PUT', 'POST'):
return self.app(env, start_response)
try:
req.split_path(4, 4, True)
is_object_request = True
except ValueError:
is_object_request = False
if not is_object_request:
return self.app(env, start_response)
if req.method in ('GET', 'HEAD'):
handler = EncrypterObjContext(self, self.logger).handle_get_or_head
elif req.method == 'PUT':
handler = EncrypterObjContext(self, self.logger).handle_put
elif req.method == 'POST':
handler = EncrypterObjContext(self, self.logger).handle_post
else:
# anything else
return self.app(env, start_response)
try:
return handler(req, start_response)
except HTTPException as err_resp:
return err_resp(env, start_response)
|
|
import os
import cPickle as pickle
import mock
import flask
from google.appengine.api import taskqueue
from flask.ext import gae
from flask.ext.gae import queuehandler
@gae.pushqueue('testqueue')
def execute(*args, **kwargs):
return "OK"
tq_bp = flask.Blueprint('test_blueprint', __name__)
@tq_bp.route('/queue/')
@gae.pushqueue('testqueue')
def blueprint_task(a, b, c):
return "OK"
ROW_WORKER = mock.MagicMock()
@tq_bp.route('/worker')
@gae.pullqueue('pullqueue', 'module', 123, 50)
def worker(rows):
for task, data in rows:
ROW_WORKER(data)
yield task
class PushQueueViewTestCase(gae.testing.TestCase):
def setUp(self):
execute_patch = mock.patch.object(execute, 'func')
self.addCleanup(execute_patch.stop)
self.execute = execute_patch.start()
def create_app(self):
self.logger = mock.Mock()
app = flask.Flask(__name__)
self.view = execute
app.add_url_rule(
'/testhandler/',
view_func=execute)
app.register_blueprint(tq_bp, url_prefix='/bp')
return app
def make_request(self, *args, **kwargs):
payload = pickle.dumps((args, kwargs))
return self.client.post('/testhandler/', data=payload, headers={
'X-AppEngine-QueueName': 'test',
'X-AppEngine-TaskRetryCount': 1,
})
def test_no_queueheaders(self):
resp = self.client.post('/testhandler/', data="")
self.assert403(resp)
def test_with_queueheaders(self):
"""
Check that when calling the endpoint with the correct headers
execute() gets called with the right values (the payload).
"""
resp = self.make_request(1, 2, 3, kw='arg')
self.assert200(resp)
self.execute.assert_called_once_with(1, 2, 3, kw='arg')
def test_with_future(self):
"""
Check that when execute() returns a future we get the result before
finishing the request
"""
self.execute.return_value = mock.Mock(['get_result'])
resp = self.make_request(1, 2, 3, kw='arg')
self.assert200(resp)
self.execute.assert_called_once_with(1, 2, 3, kw='arg')
# Check the future result is retreived
self.execute().get_result.assert_called_once_with()
def test_failure(self):
"""
When a call to execute() fails, make sure we log an exception and
return a 500 response so the task can be retried.
"""
self.execute.side_effect = NotImplementedError()
resp = self.make_request(1, 2, 3, kw='arg')
self.assertEqual(resp.status_code, 500)
@mock.patch('google.appengine.api.taskqueue.add')
def test_queue(self, tq_add):
payload = pickle.dumps(((1, 2, 3), {'kw': 'arg'}))
self.view.queue(1, 2, 3, kw='arg')
tq_add.assert_called_once_with(
url='/testhandler/',
queue_name='testqueue',
payload=payload,
transactional=None,
eta=None,
target=None,
name=None,
)
@mock.patch('google.appengine.api.taskqueue.add')
def test_queue_extra_args(self, tq_add):
self.view.queue(1, 2, 3, kw='arg',
_eta=mock.sentinel.ETA,
_transactional=mock.sentinel.TRANSACTIONAL,
_target=mock.sentinel.TARGET,
_name=mock.sentinel.NAME)
tq_add.assert_called_once_with(
url='/testhandler/',
queue_name='testqueue',
payload=mock.ANY,
transactional=mock.sentinel.TRANSACTIONAL,
eta=mock.sentinel.ETA,
target=mock.sentinel.TARGET,
name=mock.sentinel.NAME,
)
@mock.patch('google.appengine.api.taskqueue.add')
def test_blueprint_queue(self, tq_add):
"""
Check that we get the correct url when registered under a blueprint
"""
blueprint_task.queue(1, 2, 3)
tq_add.assert_called_once_with(
url='/bp/queue/',
queue_name='testqueue',
payload=mock.ANY,
transactional=None,
eta=None,
target=None,
name=None,
)
@mock.patch('google.appengine.api.taskqueue.add')
def test_alternate_app(self, tq_add):
app2 = flask.Flask('other_guy')
@app2.route('/foo/bar/baz/')
@gae.pushqueue('other_queue')
def other_app_handler():
return "OK"
other_app_handler.queue(_app=app2)
tq_add.assert_called_once_with(
url='/foo/bar/baz/',
queue_name='other_queue',
payload=mock.ANY,
transactional=None,
eta=None,
target=None,
name=None,
)
class PullWorkerTestCase(gae.testing.TestCase):
taskqueue_stub = {'root_path': os.path.dirname(__file__)}
def create_app(self):
app = flask.Flask(__name__)
app.register_blueprint(tq_bp)
return app
def setUp(self):
ROW_WORKER.reset_mock()
@mock.patch('google.appengine.api.taskqueue.Queue')
@mock.patch('google.appengine.api.taskqueue.Task', mock.call)
def test_push_single(self, Queue):
worker.push(mock.sentinel.TASK1, eta=mock.sentinel.ETA)
Queue().add.assert_called_once_with(
[mock.call(
payload=pickle.dumps(mock.sentinel.TASK1),
method='PULL',
eta=mock.sentinel.ETA)]
)
@mock.patch('google.appengine.api.taskqueue.Queue')
@mock.patch('google.appengine.api.taskqueue.Task', mock.call)
def test_push_multiple(self, Queue):
worker.push(mock.sentinel.TASK1,
mock.sentinel.TASK2)
Queue().add.assert_called_once_with(
[mock.call(
payload=pickle.dumps(mock.sentinel.TASK1),
method='PULL'),
mock.call(
payload=pickle.dumps(mock.sentinel.TASK2),
method='PULL')]
)
@mock.patch.object(taskqueue.Queue, 'delete_tasks')
@mock.patch.object(taskqueue.Queue, 'lease_tasks',
wraps=taskqueue.Queue('pullqueue').lease_tasks)
def test_pull(self, lease_tasks, delete_tasks):
for i in xrange(100):
worker.push(i)
worker._pull()
self.assertEqual(ROW_WORKER.call_args_list,
[mock.call(i) for i in xrange(100)])
# We call lease_tasks 3 times because we expect there to be some
# afterwards
self.assertEqual(
lease_tasks.call_args_list,
[mock.call(123, 50), mock.call(123, 50), mock.call(123, 50)])
# but we only expect two calls to delete_tasks
self.assertEqual(
delete_tasks.call_args_list,
[mock.call([mock.ANY]*50), mock.call([mock.ANY]*50)])
def test_pull_tags(self):
pass
@mock.patch.object(taskqueue.Queue, 'delete_tasks')
@mock.patch.object(taskqueue.Queue, 'lease_tasks')
def test_pull_locked(self, delete_tasks, lease_tasks):
queuehandler._PullWorkerLock.acquire('pullqueue')
worker._pull()
self.assertFalse(lease_tasks.called)
self.assertFalse(delete_tasks.called)
self.assertFalse(ROW_WORKER.called)
@mock.patch.object(queuehandler, 'start_new_background_thread')
def test_get(self, bg_thread):
self.client.get('/worker')
bg_thread.assert_called_once_with(worker._pull, ())
def test_start(self):
pass
|
|
__author__ = 'volodymyr'
from gizer.util import *
import json
import re
def test_get_cleaned_field_name():
field_name = '_test_'
assert get_cleaned_field_name(field_name) == 'test_'
field_name = '__test_'
assert get_cleaned_field_name(field_name) == 'test_'
field_name = '_ _test'
assert get_cleaned_field_name(field_name) == 'test'
field_name = '_ _3_test'
assert get_cleaned_field_name(field_name) == 'test'
print('TEST', 'get_field_name_without_underscore', 'PASSED')
def test_isIdField():
field_names = ['id', 'oid', '_id', '_oid', '_id_oid', 'id_oid']
for field_name in field_names:
assert is_id_field(field_name)
field_names = ['___id', 'oid_aaaa', 'ID']
for field_name in field_names:
assert not is_id_field(field_name)
print('TEST', 'isIdField', 'PASSED')
def test_get_postgres_type():
type_name = 'STRING'
assert get_postgres_type(type_name) == 'text'
type_name = 'INT'
assert get_postgres_type(type_name) == 'integer'
type_name = 'BOOLean'
assert get_postgres_type(type_name) == 'boolean'
type_name = 'LONG'
assert get_postgres_type(type_name) == 'bigint'
# TODO should be 'text'
# type_name = 'string'
# assert get_postgres_type(type_name) is None
print('TEST', 'get_postgres_type', 'PASSED')
def test_get_table_name_from_list():
# TODO should be person_relative_contact_phones
path = 'persons.relatives.contacts.phones'
assert get_table_name_from_list(path.split('.')) == 'person_relative_contact_phones'
path = 'persons.relatives.2.contacts.3.phones.4'
assert get_table_name_from_list(path.split('.')) == 'person_relative_contact_phones'
path = 'persons.relatives.2contactd.phones'
assert get_table_name_from_list(path.split('.')) == 'person_relative_2contactd_phones'
path = 'persons.relatives2'
assert get_table_name_from_list(path.split('.')) == 'person_relatives2'
path = 'persons'
assert get_table_name_from_list(path.split('.')) == 'persons'
print('TEST', 'get_table_name_from_list', 'PASSED')
def test_get_root_table_from_path():
path = 'persons.relatives.2.contacts.3.phones.4'
# assert get_root_table_from_path(path) == 'persons_relatives_contacts_phones'
assert get_root_table_from_path(path) == 'persons'
path = 'persons.relatives.contacts.3.phones.4'
assert get_root_table_from_path(path) == 'persons'
path = 'persons.relatives.contacts.phones'
assert get_root_table_from_path(path) == 'persons'
path = 'persons.4.relatives.contacts.phones'
assert get_root_table_from_path(path) == 'persons'
print('TEST', 'get_root_table_from_path', 'PASSED')
def test_get_indexes_dictionary():
path = 'persons.relatives.2.contacts.3.phones.4'
model = {'person_relative_contact_phones': '5', 'person_relatives': '3', 'person_relative_contacts': '4'}
f_return = get_indexes_dictionary(path)
assert model == f_return
path = 'persons.relatives.contacts.phones.4'
model = {'person_relative_contact_phones': '5'}
f_return = get_indexes_dictionary(path)
assert model == f_return
path = 'persons.1.relatives.2.contacts.3.phones.4'
model = {'person_relative_contact_phones': '5', 'person_relatives': '3', 'person_relative_contacts': '4',
'persons': '2'}
f_return = get_indexes_dictionary(path)
assert model == f_return
path = 'persons.relatives.contacts.phones'
model = {}
f_return = get_indexes_dictionary(path)
assert model == f_return
path = 'persons.1.relatives.2.contacts.3.phones'
model = {'person_relative_contacts': '4', 'persons': '2', 'person_relatives': '3'}
f_return = get_indexes_dictionary(path)
assert model == f_return
path = 'persons.1.relatives.contacts.3.phones.6.numbers'
model = {'person_relative_contacts': '4', 'persons': '2', 'person_relative_contact_phones': '7'}
f_return = get_indexes_dictionary(path)
assert model == f_return
print('TEST', 'get_indexes_dictionary', 'PASSED')
def test_get_indexes_dictionary_idx():
path = 'persons.relatives.2.contacts.3.phones.4'
model = {'persons_relatives_contacts_phones': '5', 'persons_relatives': '3', 'persons_relatives_contacts': '4'}
f_return = get_indexes_dictionary_idx(path)
assert model == f_return
path = 'persons.relatives.contacts.phones.4'
model = {'persons_relatives_contacts_phones': '5'}
f_return = get_indexes_dictionary_idx(path)
assert model == f_return
path = 'persons.1.relatives.2.contacts.3.phones.4'
model = {'persons_relatives_contacts_phones': '5', 'persons_relatives': '3', 'persons_relatives_contacts': '4',
'persons': '2'}
f_return = get_indexes_dictionary_idx(path)
assert model == f_return
path = 'persons.relatives.contacts.phones'
model = {}
f_return = get_indexes_dictionary_idx(path)
assert model == f_return
path = 'persons.1.relatives.2.contacts.3.phones'
model = {'persons_relatives_contacts': '4', 'persons': '2', 'persons_relatives': '3'}
f_return = get_indexes_dictionary_idx(path)
assert model == f_return
path = 'persons.1.relatives.contacts.3.phones.6.numbers'
model = {'persons_relatives_contacts': '4', 'persons': '2', 'persons_relatives_contacts_phones': '7'}
f_return = get_indexes_dictionary_idx(path)
assert model == f_return
print('TEST', 'get_indexes_dictionary', 'PASSED')
def test_get_last_idx_from_path():
path = 'persons.relatives.contacts.phones'
assert get_last_idx_from_path(path) is None
path = 'persons.1.relatives.2.contacts.3.phones.4'
assert get_last_idx_from_path(path) == '5'
path = 'persons.relatives.contacts.phones.234'
assert get_last_idx_from_path(path) == '235'
print('TEST', 'get_last_idx_from_path', 'PASSED')
def test_get_tables_structure():
schema = json.loads(open('test_data/test_schema5.txt').read())
collection_name = 'documents'
result = get_tables_structure(schema, collection_name, {}, {}, 1, '')
model = {
u'documents': {
u'personal_info_driver_licence': u'text',
u'personal_info_fl_name_f_name': u'text',
u'id_bsontype': u'integer',
u'personal_info_date_of_birth': u'text',
u'clients': u'text',
u'personal_info_fl_name_l_name': u'text',
u'id_oid': u'text'
},
u'document_relative_contacts': {
u'city': u'text',
u'apartment': u'text',
u'street': u'text',
u'idx': u'bigint',
u'zip': u'text',
u'document_relatives_idx': u'bigint',
u'state': u'text',
u'documents_id_oid': u'text',
u'id': u'text'
},
u'document_relative_contact_phones': {
u'count': u'integer',
u'documents_id_oid': u'text',
u'idx': u'bigint',
u'document_relatives_idx': u'bigint',
u'number': u'text',
u'document_relative_contacts_idx': u'bigint'
},
u'document_dates': {
u'date1': u'text',
u'date3': u'text',
u'documents_id_oid': u'text',
u'date4': u'text',
u'idx': u'bigint',
u'date2': u'text'
},
u'document_relatives': {
u'age': u'integer',
u'documents_id_oid': u'text',
u'relation': u'text',
u'name': u'text',
u'idx': u'bigint'
},
u'document_personal_info_fl_name_SSNs': {
u'documents_id_oid': u'text',
u'SSNs': u'integer',
u'idx': u'bigint'
},
u'document_indeces': {
u'documents_id_oid': u'text',
u'indeces': u'integer',
u'idx': u'bigint'
},
u'document_relative_jobs': {
u'test1': u'integer',
u'test2': u'text',
u'documents_id_oid': u'text',
u'idx': u'bigint',
u'document_relatives_idx': u'bigint'
}
}
assert model == result
schema = json.loads(open('test_data/test_schema6.txt').read())
result = get_tables_structure(schema, collection_name, {}, {}, 1, '')
model = {'documents': {}}
assert model == result
schema = json.loads(open('test_data/test_schema4.txt').read())
result = get_tables_structure(schema, collection_name, {}, {}, 1, '')
model = {
'documents': {
'personal_info_driver_licence': 'text',
'personal_info_date_of_birth': 'text',
'personal_info_full_name_bigint_number':'bigint',
'id_bsontype': 'integer',
'personal_info_full_name_last_name': 'text',
'field': 'text',
'id_oid': 'text',
'personal_info_full_name_first_name': 'text'
},
'document_relative_contacts': {
'city': 'text',
'apartment': 'text',
'street': 'text',
'idx': 'bigint',
'zip': 'text',
'document_relatives_idx': 'bigint',
'state': 'text',
'documents_id_oid': 'text',
'id': 'text'
},
'document_relative_contact_phones': {
'count': 'integer',
'documents_id_oid': 'text',
'idx': 'bigint',
'document_relatives_idx': 'bigint',
'number': 'text',
'document_relative_contacts_idx': 'bigint'
},
'document_dates': {
'date1': 'text',
'date3': 'text',
'documents_id_oid': 'text',
'date4': 'text',
'idx': 'bigint',
'date2': 'text'
},
'document_relatives': {
'age': 'integer',
'documents_id_oid': 'text',
'relation': 'text',
'name': 'text',
'idx': 'bigint'
},
'document_indeces': {
'documents_id_oid': 'text',
'indeces': 'integer',
'idx': 'bigint'
},
'document_personal_info_full_name_SSNs': {
'documents_id_oid': 'text',
'SSNs': 'integer',
'idx': 'bigint'
}
}
assert model == result
schema = json.loads(open('test_data/test_schema.txt').read())
result = get_tables_structure(schema, collection_name, {}, {}, 1, '')
model = {
'documents': {
'field': 'text',
'i2d_bsontype': 'integer',
'i2d_oid': 'text'
},
'document_relative_contacts': {
'city': 'text',
'apartment': 'text',
'idx': 'bigint',
'zip': 'text',
'document_relatives_idx': 'bigint',
'state': 'text',
'street': 'text',
'documents_idx': 'bigint'
},
'document_relative_contact_phones': {
'count': 'integer',
'idx': 'bigint',
'document_relatives_idx': 'bigint',
'number': 'text',
'document_relative_contacts_idx': 'bigint',
'documents_idx': 'bigint'
},
'document_dates': {
'date1': 'text',
'date3': 'text',
'date2': 'text',
'date4': 'text',
'idx': 'bigint',
'documents_idx': 'bigint'
},
'document_relatives': {
'age': 'integer',
'documents_idx': 'bigint',
'relation': 'text',
'name': 'text',
'idx': 'bigint'
},
'document_indeces': {
'documents_idx': 'bigint',
'indeces': 'integer',
'idx': 'bigint'
}
}
assert model == result
print('TEST', 'get_tables_structure', 'PASSED')
def test_get_quotes_using():
schema = json.loads(open('test_data/test_schema5.txt').read())
collection_name = 'documents'
table = 'documents'
field_name = 'id_bsontype'
model = False
result = get_quotes_using(schema, table, field_name, collection_name)
assert model == result
table = 'documents'
field_name = 'personal_info_fl_name_f_name'
model = True
result = get_quotes_using(schema, table, field_name, collection_name)
assert model == result
table = 'document_relative_contacts'
field_name = 'zip'
model = True
result = get_quotes_using(schema, table, field_name, collection_name)
assert model == result
table = 'document_personal_info_fl_name_SSNs'
field_name = 'documents_id_oid'
model = True
result = get_quotes_using(schema, table, field_name, collection_name)
assert model == result
table = 'document_personal_info_fl_name_SSNs'
field_name = 'SSNs'
model = False
result = get_quotes_using(schema, table, field_name, collection_name)
assert model == result
print('TEST', 'get_quotes_using', 'PASSED')
def test_get_column_type():
schema = json.loads(open('test_data/test_schema4.txt').read())
collection_name = 'documents'
table = 'document_relatives'
field_name = 'relation'
model = 'text'
result = get_column_type(schema, table, field_name, collection_name)
assert model == result
table = 'document_personal_info_full_name_SSNs'
field_name = 'SSNs'
model = 'integer'
result = get_column_type(schema, table, field_name, collection_name)
assert model == result
table = 'document_personal_info_full_name_SSNs'
field_name = 'documents_id_oid'
model = 'text'
result = get_column_type(schema, table, field_name, collection_name)
assert model == result
table = 'document_relative_contact_phones'
field_name = 'document_relative_contacts_idx'
model = 'bigint'
result = get_column_type(schema, table, field_name, collection_name)
assert model == result
table = 'document_relative_contact_phones'
field_name = 'number'
model = 'text'
result = get_column_type(schema, table, field_name, collection_name)
assert model == result
print('TEST', 'get_column_type', 'PASSED')
def test_get_ids_list():
schema_str = """{
"id": {
"oid": "STRING",
"bsontype": "INT"
},
"comments": [{
"_id": {
"oid": "STRING",
"bsontype": "INT"
},
"body": "STRING"
}],
"_id": "INT"
}"""
schema = json.loads(schema_str)
result = get_ids_list(schema)
model = {"id_oid":"text"}
assert result == model
schema_str = """{
"comments": [{
"_id": {
"oid": "STRING",
"bsontype": "INT"
},
"body": "STRING"
}],
"id": "INT"
}"""
schema = json.loads(schema_str)
result = get_ids_list(schema)
model = {"id":"integer"}
assert result == model
schema_str = """{
"comments": [{
"_id": {
"oid": "STRING",
"bsontype": "INT"
},
"body": "STRING"
}],
"id": "INT",
"_id": {
"oid": "STRING",
"bsontype": "INT"
}
}"""
schema = json.loads(schema_str)
result = get_ids_list(schema)
model = {"id_oid":"text"}
assert result == model
schema_str = """{
"comments": [{
"_id": {
"oid": "STRING",
"bsontype": "INT"
},
"body": "STRING"
}]
}"""
schema = json.loads(schema_str)
result = get_ids_list(schema)
model = {"idx":"bigint"}
assert result == model
print('TEST', 'get_ids_list', 'PASSED')
# functions for comparing SQL queries
def sqls_to_dict(sql_dict):
parsed_dict = {}
# print(sql_dict)
for model_item in sql_dict:
if model_item == 'upd' or model_item == 'del':
if model_item == 'upd':
if type(sql_dict[model_item]) == dict:
for sql in sql_dict[model_item]:
r = re.compile('UPDATE(.*?)WHERE')
ext_key = str.strip(r.search(sql).group(1)).replace('"', '')
parsed_dict['UPD_' + ext_key] = parse_upd(sql, sql_dict[model_item][sql])
if model_item == 'del':
if type(sql_dict[model_item]) == dict:
for sql in sql_dict[model_item]:
r = re.compile('DELETE FROM(.*?)WHERE')
ext_key = str.strip(r.search(sql).group(1)).replace('"', '')
parsed_dict['DEL_' + ext_key] = parse_del({sql: sql_dict[model_item][sql]})
else:
q_item = model_item.iterkeys().next()
q_vals = list(model_item.itervalues().next()[0])
if q_item.startswith('DELETE FROM '):
r = re.compile('DELETE FROM(.*?)WHERE')
ext_key = str.strip(r.search(q_item).group(1)).replace('"', '')
parsed_dict['DELETE_' + ext_key] = parse_del({q_item: model_item[q_item][0]})
elif q_item.startswith('UPDATE '):
# for sql in sql_dict[model_item]:
# r = re.compile('UPDATE (.*?)WHERE')
# ext_key = str.strip(r.search(q_item).group(1))
parsed_dict.update(parse_upd(q_item, model_item[q_item][0]))
elif q_item.startswith('do $$ begin'):
upsert_dict = upsert_to_dict(q_item, q_vals)
parsed_dict.update(upsert_dict)
elif q_item.startswith('INSERT INTO '):
insert_dict = parse_insert(q_item, q_vals)
parsed_dict.update(insert_dict)
else:
pass
return parsed_dict
def upsert_to_dict(tmplt, values):
res = re.search('begin (.*?)IF FOUND THEN', tmplt)
if not res:
return {'empty': None}
upd_tmplt = str.strip(res.group(1))
val_count = upd_tmplt.count('=(%s)')
upd_dict = parse_upd(tmplt, values[:val_count])
res = re.search('END IF; BEGIN (.*?) RETURN;', tmplt)
if not res:
return {'empty': None}
ins_tmplt = str.strip(res.group(1))
ins_dict = parse_insert(ins_tmplt, values[val_count:])
main_key = 'UPSERT_' + upd_dict.iterkeys().next() + '_' + ins_dict.iterkeys().next()
ret_val = {main_key: {'upd': upd_dict, 'ins': ins_dict}}
return ret_val
def parse_insert(sql_ins, values):
sql = sql_ins
updated_table = re.search('INSERT INTO(.*?)\(', sql).group(1).strip().replace('"', '')
columns_strs = [ins_col.strip() for ins_col in re.search('\((.*?)\)', sql).group(1).split(',')]
values_strs = [set_col.strip() for set_col in re.search('VALUES\((.*?)\);', sql).group(1).split(',')]
assert len(columns_strs) == len(values_strs)
insert_value_dict = {}
insert_values_list = []
for i, column in enumerate(columns_strs):
el_val = (column + '=' + str(values[i])).replace('"', '').replace("'", '').replace(':', '').replace('(',
'').replace(
'(', '').replace(')', '')
insert_values_list.append(el_val)
ins_key = 'INSERT_' + '_'.join(sorted(insert_values_list))
return {ins_key: {'table': updated_table, 'insert_value': insert_value_dict}}
def parse_upd(sql_upd, values):
sql = sql_upd # ql_upd.iterkeys().next()
updated_table = re.search('UPDATE(.*?)SET', sql).group(1).strip().replace('"', '')
set_strs = [set_col.strip() for set_col in re.search('SET(.*?)WHERE', sql).group(1).split(', ')]
where_strs = [set_col.strip() for set_col in re.search('WHERE(.*?);', sql).group(1).split('and')]
all_strs = set_strs + where_strs
i = 0
all_str_val = []
for el in all_strs:
if el.count('(%s)') > 0:
el_val = el.replace('(%s)', str(values[i])).replace('"', '').replace("'", '').replace(':', '').replace('(',
'').replace(
'(', '').replace(')', '')
all_str_val.append(el_val)
i = i + 1
key = 'UPDATE_' + '_'.join(sorted(all_str_val))
set_value = {}
last_i = 0
filled_values = 0
for i, column in enumerate(set_strs):
set_value[column.replace('"', '')] = values[i]
if not column.endswith('=(%s)'):
filled_values = filled_values + 1
last_i = i + 1
last_i = last_i - filled_values
where_value = {}
for i, column in enumerate(where_strs):
where_value[column.replace('"', '')] = values[i + last_i]
return {key: {'table': updated_table, 'set_value': set_value, 'where_dict': where_value}}
def parse_del(sql_upd):
if not type(sql_upd) is dict:
return {}
stmnt = sql_upd.iterkeys().next()
values = sql_upd.itervalues().next()
clauses = stmnt.split(' ')
updated_table = clauses[2].replace('"', '')
where_clauses = [cl.replace(';', '') for cl in clauses[4:] if cl != 'and']
where_dict = {}
for i, cl in enumerate(where_clauses):
where_dict[cl.replace('"', '')] = values[i]
return {'table': 'DELETE_' + updated_table, 'where_dict': where_dict}
def upsert_pretty_print(upsert_stmnts):
ups_SQL = upsert_stmnts.iterkeys().next()
ups_vals = upsert_stmnts[ups_SQL][0]
res = re.search('begin (.*?)IF FOUND THEN', ups_SQL)
if res:
upd_tmplt = str.strip(res.group(1))
val_count = upd_tmplt.count('=(%s)')
print(upd_tmplt)
print([str(el) for el in ups_vals[:val_count]])
res = re.search('END IF; BEGIN (.*?) RETURN;', ups_SQL)
if res:
ins_tmplt = str.strip(res.group(1))
print(ins_tmplt)
print ([str(el) for el in ups_vals[val_count:]])
else:
print(ups_SQL)
print([str(el) for el in ups_vals])
def sql_pretty_print(sqls):
print('\n\n')
for el in sqls:
if type(el) is dict:
upsert_pretty_print(el)
else:
print(el)
def run_tests_():
test_get_cleaned_field_name()
test_isIdField()
test_get_postgres_type()
test_get_table_name_from_list()
test_get_root_table_from_path()
test_get_indexes_dictionary()
test_get_indexes_dictionary_idx()
test_get_last_idx_from_path()
test_get_tables_structure()
test_get_quotes_using()
test_get_column_type()
test_get_ids_list()
if __name__ == "__main__":
run_tests_()
|
|
# postgresql/psycopg2.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+psycopg2
:name: psycopg2
:dbapi: psycopg2
:connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]
:url: http://pypi.python.org/pypi/psycopg2/
psycopg2 Connect Arguments
-----------------------------------
psycopg2-specific keyword arguments which are accepted by
:func:`.create_engine()` are:
* *server_side_cursors* - Enable the usage of "server side cursors" for SQL
statements which support this feature. What this essentially means from a
psycopg2 point of view is that the cursor is created using a name, e.g.
``connection.cursor('some name')``, which has the effect that result rows are
not immediately pre-fetched and buffered after statement execution, but are
instead left on the server and only retrieved as needed. SQLAlchemy's
:class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering
behavior when this feature is enabled, such that groups of 100 rows at a
time are fetched over the wire to reduce conversational overhead.
Note that the ``stream_results=True`` execution option is a more targeted
way of enabling this mode on a per-execution basis.
* *use_native_unicode* - Enable the usage of Psycopg2 "native unicode" mode
per connection. True by default.
Unix Domain Connections
------------------------
psycopg2 supports connecting via Unix domain connections. When the ``host``
portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2,
which specifies Unix-domain communication rather than TCP/IP communication::
create_engine("postgresql+psycopg2://user:password@/dbname")
By default, the socket file used is to connect to a Unix-domain socket
in ``/tmp``, or whatever socket directory was specified when PostgreSQL
was built. This value can be overridden by passing a pathname to psycopg2,
using ``host`` as an additional keyword argument::
create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql")
See also:
`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
Per-Statement/Connection Execution Options
-------------------------------------------
The following DBAPI-specific options are respected when used with
:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`,
:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs:
* isolation_level - Set the transaction isolation level for the lifespan of a
:class:`.Connection` (can only be set on a connection, not a statement
or query). This includes the options ``SERIALIZABLE``, ``READ COMMITTED``,
``READ UNCOMMITTED`` and ``REPEATABLE READ``.
* stream_results - Enable or disable usage of server side cursors.
If ``None`` or not set, the ``server_side_cursors`` option of the
:class:`.Engine` is used.
Unicode
-------
By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE``
extension, such that the DBAPI receives and returns all strings as Python
Unicode objects directly - SQLAlchemy passes these values through without
change. Psycopg2 here will encode/decode string values based on the
current "client encoding" setting; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf-8``, as a more useful default::
#client_encoding = sql_ascii # actually, defaults to database
# encoding
client_encoding = utf8
A second way to affect the client encoding is to set it within Psycopg2
locally. SQLAlchemy will call psycopg2's ``set_client_encoding()``
method (see: http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding)
on all new connections based on the value passed to
:func:`.create_engine` using the ``client_encoding`` parameter::
engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8')
This overrides the encoding specified in the Postgresql client configuration.
.. versionadded:: 0.7.3
The psycopg2-specific ``client_encoding`` parameter to
:func:`.create_engine`.
SQLAlchemy can also be instructed to skip the usage of the psycopg2
``UNICODE`` extension and to instead utilize it's own unicode encode/decode
services, which are normally reserved only for those DBAPIs that don't
fully support unicode directly. Passing ``use_native_unicode=False`` to
:func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``.
SQLAlchemy will instead encode data itself into Python bytestrings on the way
in and coerce from bytes on the way back,
using the value of the :func:`.create_engine` ``encoding`` parameter, which
defaults to ``utf-8``.
SQLAlchemy's own unicode encode/decode functionality is steadily becoming
obsolete as more DBAPIs support unicode fully along with the approach of
Python 3; in modern usage psycopg2 should be relied upon to handle unicode.
Transactions
------------
The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
.. _psycopg2_isolation:
Transaction Isolation Level
---------------------------
The ``isolation_level`` parameter of :func:`.create_engine` here makes use
psycopg2's ``set_isolation_level()`` connection method, rather than
issuing a ``SET SESSION CHARACTERISTICS`` command. This because psycopg2
resets the isolation level on each new transaction, and needs to know
at the API level what level should be used.
NOTICE logging
---------------
The psycopg2 dialect will log Postgresql NOTICE messages via the
``sqlalchemy.dialects.postgresql`` logger::
import logging
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
HSTORE type
------------
The psycopg2 dialect will make use of the
``psycopg2.extensions.register_hstore()`` extension when using the HSTORE
type. This replaces SQLAlchemy's pure-Python HSTORE coercion which takes
effect for other DBAPIs.
"""
from __future__ import absolute_import
import re
import logging
from ... import util, exc
import decimal
from ... import processors
from ...engine import result as _result
from ...sql import expression
from ... import types as sqltypes
from .base import PGDialect, PGCompiler, \
PGIdentifierPreparer, PGExecutionContext, \
ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\
_INT_TYPES
from .hstore import HSTORE
logger = logging.getLogger('sqlalchemy.dialects.postgresql')
class _PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(decimal.Decimal)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
class _PGEnum(ENUM):
def __init__(self, *arg, **kw):
super(_PGEnum, self).__init__(*arg, **kw)
# Py2K
if self.convert_unicode:
self.convert_unicode = "force"
# end Py2K
class _PGArray(ARRAY):
def __init__(self, *arg, **kw):
super(_PGArray, self).__init__(*arg, **kw)
# Py2K
# FIXME: this check won't work for setups that
# have convert_unicode only on their create_engine().
if isinstance(self.item_type, sqltypes.String) and \
self.item_type.convert_unicode:
self.item_type.convert_unicode = "force"
# end Py2K
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).bind_processor(dialect)
def result_processor(self, dialect, coltype):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).result_processor(dialect, coltype)
# When we're handed literal SQL, ensure it's a SELECT-query. Since
# 8.3, combining cursors and "FOR UPDATE" has been fine.
SERVER_SIDE_CURSOR_RE = re.compile(
r'\s*SELECT',
re.I | re.UNICODE)
_server_side_id = util.counter()
class PGExecutionContext_psycopg2(PGExecutionContext):
def create_cursor(self):
# TODO: coverage for server side cursors + select.for_update()
if self.dialect.server_side_cursors:
is_server_side = \
self.execution_options.get('stream_results', True) and (
(self.compiled and isinstance(self.compiled.statement, expression.Selectable) \
or \
(
(not self.compiled or
isinstance(self.compiled.statement, expression.TextClause))
and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement))
)
)
else:
is_server_side = \
self.execution_options.get('stream_results', False)
self.__is_server_side = is_server_side
if is_server_side:
# use server-side cursors:
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
return self._dbapi_connection.cursor(ident)
else:
return self._dbapi_connection.cursor()
def get_result_proxy(self):
# TODO: ouch
if logger.isEnabledFor(logging.INFO):
self._log_notices(self.cursor)
if self.__is_server_side:
return _result.BufferedRowResultProxy(self)
else:
return _result.ResultProxy(self)
def _log_notices(self, cursor):
for notice in cursor.connection.notices:
# NOTICE messages have a
# newline character at the end
logger.info(notice.rstrip())
cursor.connection.notices[:] = []
class PGCompiler_psycopg2(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
def post_process_text(self, text):
return text.replace('%', '%%')
class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace('%', '%%')
class PGDialect_psycopg2(PGDialect):
driver = 'psycopg2'
# Py2K
supports_unicode_statements = False
# end Py2K
default_paramstyle = 'pyformat'
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_psycopg2
statement_compiler = PGCompiler_psycopg2
preparer = PGIdentifierPreparer_psycopg2
psycopg2_version = (0, 0)
_has_native_hstore = False
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumeric,
ENUM: _PGEnum, # needs force_unicode
sqltypes.Enum: _PGEnum, # needs force_unicode
ARRAY: _PGArray, # needs force_unicode
HSTORE: _PGHStore,
}
)
def __init__(self, server_side_cursors=False, use_native_unicode=True,
client_encoding=None,
use_native_hstore=True,
**kwargs):
PGDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
self.use_native_unicode = use_native_unicode
self.use_native_hstore = use_native_hstore
self.supports_unicode_binds = use_native_unicode
self.client_encoding = client_encoding
if self.dbapi and hasattr(self.dbapi, '__version__'):
m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
self.dbapi.__version__)
if m:
self.psycopg2_version = tuple(
int(x)
for x in m.group(1, 2, 3)
if x is not None)
def initialize(self, connection):
super(PGDialect_psycopg2, self).initialize(connection)
self._has_native_hstore = self.use_native_hstore and \
self._hstore_oids(connection.connection) \
is not None
@classmethod
def dbapi(cls):
import psycopg2
return psycopg2
@util.memoized_property
def _isolation_lookup(self):
extensions = __import__('psycopg2.extensions').extensions
return {
'READ COMMITTED': extensions.ISOLATION_LEVEL_READ_COMMITTED,
'READ UNCOMMITTED': extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
'REPEATABLE READ': extensions.ISOLATION_LEVEL_REPEATABLE_READ,
'SERIALIZABLE': extensions.ISOLATION_LEVEL_SERIALIZABLE
}
def set_isolation_level(self, connection, level):
try:
level = self._isolation_lookup[level.replace('_', ' ')]
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
connection.set_isolation_level(level)
def on_connect(self):
from psycopg2 import extras, extensions
fns = []
if self.client_encoding is not None:
def on_connect(conn):
conn.set_client_encoding(self.client_encoding)
fns.append(on_connect)
if self.isolation_level is not None:
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
fns.append(on_connect)
if self.dbapi and self.use_native_unicode:
def on_connect(conn):
extensions.register_type(extensions.UNICODE, conn)
fns.append(on_connect)
if self.dbapi and self.use_native_hstore:
def on_connect(conn):
hstore_oids = self._hstore_oids(conn)
if hstore_oids is not None:
oid, array_oid = hstore_oids
extras.register_hstore(conn, oid=oid, array_oid=array_oid)
fns.append(on_connect)
if fns:
def on_connect(conn):
for fn in fns:
fn(conn)
return on_connect
else:
return None
@util.memoized_instancemethod
def _hstore_oids(self, conn):
if self.psycopg2_version >= (2, 4):
from psycopg2 import extras
oids = extras.HstoreAdapter.get_oids(conn)
if oids is not None and oids[0]:
return oids[0:2]
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.OperationalError):
# these error messages from libpq: interfaces/libpq/fe-misc.c.
# TODO: these are sent through gettext in libpq and we can't
# check within other locales - consider using connection.closed
return 'terminating connection' in str(e) or \
'closed the connection' in str(e) or \
'connection not open' in str(e) or \
'could not receive data from server' in str(e)
elif isinstance(e, self.dbapi.InterfaceError):
# psycopg2 client errors, psycopg2/conenction.h, psycopg2/cursor.h
return 'connection already closed' in str(e) or \
'cursor already closed' in str(e)
elif isinstance(e, self.dbapi.ProgrammingError):
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
return "losed the connection unexpectedly" in str(e)
else:
return False
dialect = PGDialect_psycopg2
|
|
# Copyrigh 2014 Cisco Systems.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Padmanabhan Krishnan, Cisco Systems, Inc.
import collections
import mock
from dfa.agent.vdp import lldpad
from dfa.agent.vdp import lldpad_constants as vdp_const
from dfa.common import dfa_sys_lib as utils
from neutron.tests import base
try:
OrderedDict = collections.OrderedDict
except AttributeError:
import ordereddict
OrderedDict = ordereddict.OrderedDict
class LldpadDriverTest(base.BaseTestCase):
"""A test suite to exercise the Lldpad Driver. """
def setUp(self):
''' Setup for the test scripts '''
super(LldpadDriverTest, self).setUp()
self.root_helper = 'sudo'
self.port_name = "loc_veth"
self.uplink = "eth2"
self.port_str = "loc_veth_eth2"
self.execute = mock.patch.object(
utils, "execute", spec=utils.execute).start()
self.fill_default_vsi_params()
import sys
sys.argv = []
sys.argv.append("/usr/local/bin//fabric_enabler_server")
sys.argv.append("--config-file")
sys.argv.append("/etc/enabler_conf.ini")
self._test_lldp_init()
def _test_lldp_init(self):
''' Tests the initialization '''
with mock.patch('dfa.common.utils.PeriodicTask') as period_fn:
period_obj = period_fn.return_value
parent = mock.MagicMock()
parent.attach_mock(period_obj.run, 'run')
self.lldpad = lldpad.LldpadDriver(self.port_str, self.uplink,
self.root_helper)
expected_calls = [mock.call.run()]
parent.assert_has_calls(expected_calls)
def test_init(self):
''' Place hlder for init '''
pass
def _test_enable_lldp(self, is_ncb=True):
''' Tests the routine the enables EVB cfg '''
self.lldpad.enable_lldp()
if is_ncb is True:
self.execute.assert_called_with(
["lldptool", "-L", "-i", self.port_str, "-g", "ncb",
"adminStatus=rxtx"], root_helper=self.root_helper)
def test_enable_lldp(self):
''' Tests the routine the enables LLDP cfg '''
self._test_enable_lldp(is_ncb=True)
def test_enable_evb(self):
''' Top level routine for EVB cfg test '''
parent = mock.MagicMock()
parent.attach_mock(self.execute, 'execute')
self.lldpad.enable_evb()
expected_calls = [mock.call.execute(["lldptool", "-T", "-i",
self.port_str, "-g", "ncb", "-V",
"evb", "enableTx=yes"],
root_helper=self.root_helper),
mock.call.execute(["lldptool", "-T", "-i",
self.port_str, "-g", "ncb", "-V",
"evb", "-c", "evbgpid=yes"],
root_helper=self.root_helper)]
parent.assert_has_calls(expected_calls)
def fill_default_vsi_params(self):
''' Mock VSI Params '''
self.uuid = "00000000-1111-2222-3333-444455556666"
self.vsiid = self.uuid
self.mgrid = 0
self.typeid = 0
self.typeidver = 0
self.gid = 20000
self.mac = "00:11:22:33:44:55"
self.vlan = 0
self.mgrid_str = "mgrid2=0"
self.typeid_str = "typeid=0"
self.typeidver_str = "typeidver=0"
self.vsiid_str = "uuid=00000000-1111-2222-3333-444455556666"
self.filter_str = "filter=0-00:11:22:33:44:55-20000"
self.mode_str = "mode=assoc"
def _test_vnic_assert(self, test_vlan, vlan_ret, filter_str, new_nwk,
parent, is_rest=0):
''' assert tests called by other test functions '''
if new_nwk:
if is_rest == 1:
expected_calls = [mock.call.execute(["vdptool", "-t", "-i",
self.port_str, "-R",
"-V", "assoc", "-c",
self.mode_str, "-c",
self.mgrid_str, "-c",
self.typeid_str, "-c",
self.typeidver_str, "-c",
self.vsiid_str],
root_helper=(
self.root_helper))]
else:
expected_calls = [mock.call.execute(["vdptool", "-T", "-i",
self.port_str, "-W",
"-V", "assoc", "-c",
self.mode_str, "-c",
self.mgrid_str, "-c",
self.typeid_str, "-c",
self.typeidver_str, "-c",
self.vsiid_str, "-c",
"hints=none", "-c",
filter_str],
root_helper=(
self.root_helper))]
self.assertEqual(vlan_ret, test_vlan)
self.assertEqual(test_vlan,
self.lldpad.vdp_vif_map[self.uuid].
get('vdp_vlan'))
else:
expected_calls = [mock.call.execute(["vdptool", "-T", "-i",
self.port_str, "-V", "assoc",
"-c", self.mode_str,
"-c", self.mgrid_str, "-c",
self.typeid_str, "-c",
self.typeidver_str, "-c",
self.vsiid_str, "-c",
"hints=none", "-c",
filter_str],
root_helper=self.root_helper)]
parent.assert_has_calls(expected_calls)
self.assertEqual(self.mgrid,
self.lldpad.vdp_vif_map[self.uuid].get('mgrid'))
self.assertEqual(self.typeid,
self.lldpad.vdp_vif_map[self.uuid].get('typeid'))
self.assertEqual(self.typeidver,
self.lldpad.vdp_vif_map[self.uuid].get('typeid_ver'))
self.assertEqual(self.vsiid,
self.lldpad.vdp_vif_map[self.uuid].get('vsiid'))
self.assertEqual(vdp_const.VDP_FILTER_GIDMACVID,
self.lldpad.vdp_vif_map[self.uuid].get('filter_frmt'))
self.assertEqual(self.gid,
self.lldpad.vdp_vif_map[self.uuid].get('gid'))
self.assertEqual(self.mac,
self.lldpad.vdp_vif_map[self.uuid].get('mac'))
def test_vdp_port_up_new_nwk(self):
''' Tests the case when a VM comes for a new network '''
expected_vlan = 3003
parent = mock.MagicMock()
parent.attach_mock(self.execute, 'execute')
self.execute.return_value = ("Response from VDP\n\tmode = assoc\n\t"
"mgrid2 = 0\n\ttypeid = 0\n\t"
"typeidver = 0\n\tuuid = 00000000-1111-"
"2222-3333-444455556666\n\t"
"filter = 3003-00:12:22:33:44:55-0\n")
self.lldpad.send_vdp_query_msg = mock.Mock()
vlan_ret = self.lldpad.send_vdp_vnic_up(port_uuid=self.uuid,
vsiid=self.vsiid,
mgrid=self.mgrid,
typeid=self.typeid,
typeid_ver=self.typeidver,
gid=self.gid,
mac=self.mac,
new_network=True)
self._test_vnic_assert(expected_vlan, vlan_ret, self.filter_str, True,
parent)
def test_vdp_port_up_new_nwk_after_restart(self):
''' Tests the case when a VM comes for a new network after restart '''
expected_vlan = 3003
parent = mock.MagicMock()
parent.attach_mock(self.execute, 'execute')
self.execute.return_value = ("M000080c4C3010000001509LLDPLeth5"
"020000000304mode0005assoc06mgrid2"
"0001006typeid0001009typeidver0001004"
"uuid002400000000-1111-2222-3333-44445555"
"6666\nR00C3010000001509LLDPLeth500000003"
"010504mode0005assoc06mgrid20001006typeid"
"0001009typeidver0001004uuid00000000-1111"
"-2222-3333-44445555666605hints0001006"
"filter001c3003-00:12:22:33:44:55-2000003"
"oui006105cisco07vm_name000bFW_SRVC_RTR07"
"vm_uuid002467f338a6-0925-42aa-b2df-e8114"
"e9fd0da09ipv4_addr00020l\n")
vlan_ret = self.lldpad.send_vdp_vnic_up(port_uuid=self.uuid,
vsiid=self.vsiid,
mgrid=self.mgrid,
typeid=self.typeid,
typeid_ver=self.typeidver,
gid=self.gid,
mac=self.mac,
new_network=True)
self._test_vnic_assert(expected_vlan, vlan_ret, self.filter_str, True,
parent, is_rest=1)
def test_vdp_port_up_new_nwk_invalid_vlan(self):
'''
Tests the case when an invalid VLAN is rteturned for a VM that comes
up for a new network
'''
expected_vlan = -1
parent = mock.MagicMock()
parent.attach_mock(self.execute, 'execute')
self.execute.return_value = "\nReturn from vsievt -11"
vlan_ret = self.lldpad.send_vdp_vnic_up(port_uuid=self.uuid,
vsiid=self.vsiid,
mgrid=self.mgrid,
typeid=self.typeid,
typeid_ver=self.typeidver,
gid=self.gid,
mac=self.mac,
new_network=True)
self._test_vnic_assert(expected_vlan, vlan_ret, self.filter_str, True,
parent)
def test_vdp_port_up_old_nwk(self):
''' Tests the case when a VM comes for an existing network '''
parent = mock.MagicMock()
parent.attach_mock(self.execute, 'execute')
self.execute.return_value = ("Response from VDP\n\tmode = assoc\n\t"
"mgrid2 = 0\n\ttypeid = 0\n\t"
"typeidver = 0\n\tuuid = 00000000-1111-"
"2222-3333-444455556666\n\t"
"filter = 3003-00:12:22:33:44:55-0\n")
filter_str = "filter=0-00:11:22:33:44:55-20000"
stored_vlan = 3003
self.lldpad.send_vdp_vnic_up(port_uuid=self.uuid, vsiid=self.vsiid,
mgrid=self.mgrid,
typeid=self.typeid,
typeid_ver=self.typeidver,
gid=self.gid,
mac=self.mac, vlan=0,
new_network=False)
self._test_vnic_assert(stored_vlan,
self.lldpad.vdp_vif_map[self.uuid].
get('vdp_vlan'), filter_str, False, parent)
def test_vdp_port_down(self):
''' Tests the case when a VM goes down '''
parent = mock.MagicMock()
parent.attach_mock(self.execute, 'execute')
filter_str = "filter=100-00:11:22:33:44:55-20000"
stored_vlan = 100
mode_str = "mode=deassoc"
self.lldpad.send_vdp_vnic_down(port_uuid=self.uuid, vsiid=self.vsiid,
mgrid=self.mgrid,
typeid=self.typeid,
typeid_ver=self.typeidver,
gid=self.gid,
mac=self.mac, vlan=stored_vlan)
self.execute.assert_called_with(
["vdptool", "-T", "-i", self.port_str,
"-V", "deassoc", "-c", mode_str, "-c", self.mgrid_str,
"-c", self.typeid_str, "-c", self.typeidver_str,
"-c", self.vsiid_str, "-c", "hints=none",
"-c", filter_str], root_helper=self.root_helper)
self.assertNotIn(self.uuid, self.lldpad.vdp_vif_map)
|
|
import unittest
from actuator.exceptions import InvalidDefinitionArguments, \
InvalidDefinitionName, \
InvalidSubstringInDefinitionName, \
DefinitionNamesAmbiguity
from actuator._command_line_parser import _CommandLineParser
from actuator.application import Application
from actuator.definitions.definition import Definition, _UnboundDefinition, \
_extract, _bind, _AutoArgument, \
auto, Undefined, _validate_names
class TestDefinition(unittest.TestCase):
def test_definition(self):
class Test(Definition):
def __init__(self, a, b, x, y, name=auto):
super(Test, self).__init__(name)
definition = Test(1, 'a', x=1, y='a')
self.assertIsInstance(definition, _UnboundDefinition)
self.assertEqual(definition.arguments, ((1, 'a'), {'x': 1, 'y': 'a'}))
self.assertIs(definition.error, None)
self.assertIsInstance(definition.name, _AutoArgument)
self.assertEqual(definition.name.identifier, 'name')
self.assertEqual(definition.name.index, 4)
self.assertTrue(definition.name.required)
self.assertIs(definition.name.value, None)
def test_definition_custom_name(self):
definition = Definition('test')
self.assertIsInstance(definition, _UnboundDefinition)
self.assertEqual(definition.arguments, (('test',), {}))
self.assertIs(definition.error, None)
self.assertIsInstance(definition.name, _AutoArgument)
self.assertEqual(definition.name.identifier, 'name')
self.assertEqual(definition.name.index, 0)
self.assertFalse(definition.name.required)
self.assertIs(definition.name.value, 'test')
def test_definition_invalid_arguments(self):
definition = Definition('test', name='test')
self.assertIsInstance(definition, _UnboundDefinition)
self.assertEqual(definition.arguments, (('test',), {'name': 'test'}))
self.assertIsInstance(definition.error, TypeError)
self.assertFalse(hasattr(definition, 'name'))
def test_definition__call__(self):
definition = Definition()
definition.name.value = 'test'
application = Application()
definition = definition(application, 'test')
self.assertIsInstance(definition, Definition)
self.assertEqual(definition.name, 'test')
self.assertEqual(definition.default, Undefined)
self.assertFalse(definition.nullable)
self.assertEqual(definition.parent, application)
self.assertEqual(definition.identifier, 'test')
def test_definition__call__subclassed(self):
class MyDefinition(Definition):
def __init__(self, x, y, z):
super(MyDefinition, self).__init__('test')
self.x = x
self.y = y
self.z = z
definition = MyDefinition(1, 2, 3)
application = Application()
definition = definition(application, 'test')
self.assertIsInstance(definition, Definition)
self.assertEqual(definition.parent, application)
self.assertEqual(definition.identifier, 'test')
self.assertEqual(definition.x, 1)
self.assertEqual(definition.y, 2)
self.assertEqual(definition.z, 3)
def test_definition__call__error(self):
definition = Definition('test', name='test')
application = Application()
with self.assertRaises(InvalidDefinitionArguments) as ctx:
definition(application, 'test')
self.assertIn('test', str(ctx.exception))
self.assertIn('Application', str(ctx.exception))
self.assertIn(str(definition.error), str(ctx.exception))
def test_definition__call__custom_name_with_less_arguments(self):
class Test(Definition):
def __init__(self, a, b, x=None, y=None, name=auto, z=None, t=None):
super(Test, self).__init__(name)
definition = Test(1, 2)
self.assertIsInstance(definition, _UnboundDefinition)
self.assertEqual(definition.arguments, ((1, 2), {}))
self.assertIsInstance(definition.name, _AutoArgument)
self.assertEqual(definition.name.index, 4)
self.assertTrue(definition.name.required)
definition.name.value = 'custom'
application = Application()
definition = definition(application, 'test')
self.assertIsInstance(definition, Test)
self.assertEqual(definition.name, 'custom')
def test_definition__call__custom_name_with_exact_arguments(self):
class Test(Definition):
def __init__(self, a, b, x=None, y=None, name=auto, z=None, t=None):
super(Test, self).__init__(name)
definition = Test(1, 2, 3, 4)
self.assertIsInstance(definition, _UnboundDefinition)
self.assertEqual(definition.arguments, ((1, 2, 3, 4), {}))
self.assertIsInstance(definition.name, _AutoArgument)
self.assertEqual(definition.name.index, 4)
self.assertTrue(definition.name.required)
definition.name.value = 'custom'
application = Application()
definition = definition(application, 'test')
self.assertIsInstance(definition, Test)
self.assertEqual(definition.name, 'custom')
def test_definition__call__custom_name_as_keyword_with_exact_arguments \
(self):
class Test(Definition):
def __init__(self, a, b, x=None, y=None, name=auto, z=None, t=None):
super(Test, self).__init__(name)
definition = Test(1, 2, 3, 4, name=auto)
self.assertIsInstance(definition, _UnboundDefinition)
self.assertEqual(definition.arguments, ((1, 2, 3, 4), {'name': auto}))
self.assertIsInstance(definition.name, _AutoArgument)
self.assertEqual(definition.name.index, 4)
self.assertTrue(definition.name.required)
definition.name.value = 'custom'
application = Application()
definition = definition(application, 'test')
self.assertIsInstance(definition, Test)
self.assertEqual(definition.name, 'custom')
def test_definition__call__custom_name_with_more_arguments(self):
class Test(Definition):
def __init__(self, a, b, x=None, y=None, name=auto, z=None, t=None):
super(Test, self).__init__(name)
definition = Test(1, 2, 3, 4, auto, 5, 6)
self.assertIsInstance(definition, _UnboundDefinition)
self.assertEqual(definition.arguments, ((1, 2, 3, 4, auto, 5, 6), {}))
self.assertIsInstance(definition.name, _AutoArgument)
self.assertEqual(definition.name.index, 4)
self.assertTrue(definition.name.required)
definition.name.value = 'custom'
application = Application()
definition = definition(application, 'test')
self.assertIsInstance(definition, Test)
self.assertEqual(definition.name, 'custom')
def test_definition__extract(self):
first_definition = Definition()
second_definition = Definition()
class Test(object):
x = first_definition
y = second_definition
z = True
self.assertEqual(dict(_extract(Test)), {'x': first_definition,
'y': second_definition})
def test_definition__bind(self):
first_definition = Definition()
second_definition = Definition()
third_definition = Definition('custom')
class Test(Application):
pass
test = Test()
_bind(test, (('first_definition', first_definition),
('second_definition', second_definition),
('third_definition', third_definition)))
self.assertTrue(hasattr(test, 'first_definition'))
self.assertIsInstance(test.first_definition, Definition)
self.assertEqual(test.first_definition.name, 'first-definition')
self.assertIs(test.first_definition.parent, test)
self.assertEqual(test.first_definition.identifier, 'first_definition')
self.assertTrue(hasattr(test, 'second_definition'))
self.assertIsInstance(test.second_definition, Definition)
self.assertEqual(test.second_definition.name, 'second-definition')
self.assertIs(test.second_definition.parent, test)
self.assertEqual(test.second_definition.identifier,
'second_definition')
self.assertTrue(hasattr(test, 'third_definition'))
self.assertIsInstance(test.third_definition, Definition)
self.assertEqual(test.third_definition.name, 'custom')
self.assertIs(test.third_definition.parent, test)
self.assertEqual(test.third_definition.identifier, 'third_definition')
def test_definition__validate_names(self):
names = {}
application = Application()
first_definition = Definition('duplicate')
second_definition = Definition('duplicate')
first_definition = first_definition(application, 'first_definition')
_validate_names(first_definition, names, application)
self.assertEqual(names, {'duplicate': [first_definition]})
second_definition = second_definition(application, 'second_definition')
with self.assertRaises(DefinitionNamesAmbiguity) as ctx:
_validate_names(second_definition, names, application)
self.assertEqual(names, {'duplicate': [first_definition,
second_definition]})
self.assertIn('first_definition', str(ctx.exception))
self.assertIn('second_definition', str(ctx.exception))
self.assertIn('Application', str(ctx.exception))
self.assertIn('duplicate', str(ctx.exception))
def test_definition_validate_invalid_name_type(self):
definition = Definition(False)
with self.assertRaises(InvalidDefinitionName) as ctx:
definition(Application(), 'test')
self.assertIn('False', str(ctx.exception))
self.assertIn('Application', str(ctx.exception))
self.assertIn('test', str(ctx.exception))
def test_definition_validate_separator(self):
separator = _CommandLineParser.actuator_argument_value_separator
name = 'a%sb' % separator
definition = Definition(name)
with self.assertRaises(InvalidSubstringInDefinitionName) as ctx:
definition(Application(), 'test')
message = str(ctx.exception)
self.assertIn(name, message)
self.assertIn('Application', message)
self.assertIn('test', message)
self.assertIn(separator, message.replace(name, ''))
test_suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestDefinition)
if __name__ == '__main__':
unittest.main()
|
|
'''
Carousel
========
.. image:: images/carousel.gif
:align: right
.. versionadded:: 1.4.0
The :class:`Carousel` widget provides the classic mobile-friendly carousel view
where you can swipe between slides.
You can add any content to the carousel and have it move horizontally or
vertically. The carousel can display pages in a sequence or a loop.
Example::
from kivy.app import App
from kivy.uix.carousel import Carousel
from kivy.uix.image import AsyncImage
class CarouselApp(App):
def build(self):
carousel = Carousel(direction='right')
for i in range(10):
src = "http://placehold.it/480x270.png&text=slide-%d&.png" % i
image = AsyncImage(source=src, allow_stretch=True)
carousel.add_widget(image)
return carousel
CarouselApp().run()
.. versionchanged:: 1.5.0
The carousel now supports active children, like the
:class:`~kivy.uix.scrollview.ScrollView`. It will detect a swipe gesture
according to the :attr:`Carousel.scroll_timeout` and
:attr:`Carousel.scroll_distance` properties.
In addition, the slide container is no longer exposed by the API.
The impacted properties are
:attr:`Carousel.slides`, :attr:`Carousel.current_slide`,
:attr:`Carousel.previous_slide` and :attr:`Carousel.next_slide`.
'''
__all__ = ('Carousel', )
from functools import partial
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.animation import Animation
from kivy.uix.stencilview import StencilView
from kivy.uix.relativelayout import RelativeLayout
from kivy.properties import BooleanProperty, OptionProperty, AliasProperty, \
NumericProperty, ListProperty, ObjectProperty, StringProperty
class Carousel(StencilView):
'''Carousel class. See module documentation for more information.
'''
slides = ListProperty([])
'''List of slides inside the Carousel. The slides are the
widgets added to the Carousel using the :attr:`add_widget` method.
:attr:`slides` is a :class:`~kivy.properties.ListProperty` and is
read-only.
'''
def _get_slides_container(self):
return [x.parent for x in self.slides]
slides_container = AliasProperty(_get_slides_container, None,
bind=('slides', ))
direction = OptionProperty('right',
options=('right', 'left', 'top', 'bottom'))
'''Specifies the direction in which the slides are ordered. This
corresponds to the direction from which the user swipes to go from one
slide to the next. It
can be `right`, `left`, `top`, or `bottom`. For example, with
the default value of `right`, the second slide is to the right
of the first and the user would swipe from the right towards the
left to get to the second slide.
:attr:`direction` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'right'.
'''
min_move = NumericProperty(0.2)
'''Defines the minimum distance to be covered before the touch is
considered a swipe gesture and the Carousel content changed.
This is a expressed as a fraction of the Carousel's width.
If the movement doesn't reach this minimum value, the movement is
cancelled and the content is restored to its original position.
:attr:`min_move` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.2.
'''
anim_move_duration = NumericProperty(0.5)
'''Defines the duration of the Carousel animation between pages.
:attr:`anim_move_duration` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.5.
'''
anim_cancel_duration = NumericProperty(0.3)
'''Defines the duration of the animation when a swipe movement is not
accepted. This is generally when the user does not make a large enough
swipe. See :attr:`min_move`.
:attr:`anim_cancel_duration` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.3.
'''
loop = BooleanProperty(False)
'''Allow the Carousel to loop infinitely. If True, when the user tries to
swipe beyond last page, it will return to the first. If False, it will
remain on the last page.
:attr:`loop` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
def _get_index(self):
if self.slides:
return self._index % len(self.slides)
return None
def _set_index(self, value):
if self.slides:
self._index = value % len(self.slides)
else:
self._index = None
index = AliasProperty(_get_index, _set_index, bind=('_index', 'slides'))
'''Get/Set the current slide based on the index.
:attr:`index` is an :class:`~kivy.properties.AliasProperty` and defaults
to 0 (the first item).
'''
def _prev_slide(self):
slides = self.slides
len_slides = len(slides)
index = self.index
if len_slides < 2: # None, or 1 slide
return None
if len_slides == 2:
if index == 0:
return None
if index == 1:
return slides[0]
if self.loop and index == 0:
return slides[-1]
if index > 0:
return slides[index - 1]
previous_slide = AliasProperty(_prev_slide, None, bind=('slides', 'index'))
'''The previous slide in the Carousel. It is None if the current slide is
the first slide in the Carousel. This ordering reflects the order in which
the slides are added: their presentation varies according to the
:attr:`direction` property.
:attr:`previous_slide` is an :class:`~kivy.properties.AliasProperty`.
.. versionchanged:: 1.5.0
This property no longer exposes the slides container. It returns
the widget you have added.
'''
def _curr_slide(self):
if len(self.slides):
return self.slides[self.index]
current_slide = AliasProperty(_curr_slide, None, bind=('slides', 'index'))
'''The currently shown slide.
:attr:`current_slide` is an :class:`~kivy.properties.AliasProperty`.
.. versionchanged:: 1.5.0
The property no longer exposes the slides container. It returns
the widget you have added.
'''
def _next_slide(self):
if len(self.slides) < 2: # None, or 1 slide
return None
if len(self.slides) == 2:
if self.index == 0:
return self.slides[1]
if self.index == 1:
return None
if self.loop and self.index == len(self.slides) - 1:
return self.slides[0]
if self.index < len(self.slides) - 1:
return self.slides[self.index + 1]
next_slide = AliasProperty(_next_slide, None, bind=('slides', 'index'))
'''The next slide in the Carousel. It is None if the current slide is
the last slide in the Carousel. This ordering reflects the order in which
the slides are added: their presentation varies according to the
:attr:`direction` property.
:attr:`next_slide` is an :class:`~kivy.properties.AliasProperty`.
.. versionchanged:: 1.5.0
The property no longer exposes the slides container.
It returns the widget you have added.
'''
scroll_timeout = NumericProperty(200)
'''Timeout allowed to trigger the :attr:`scroll_distance`, in milliseconds.
If the user has not moved :attr:`scroll_distance` within the timeout,
no scrolling will occur and the touch event will go to the children.
:attr:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty` and
defaults to 200 (milliseconds)
.. versionadded:: 1.5.0
'''
scroll_distance = NumericProperty('20dp')
'''Distance to move before scrolling the :class:`Carousel` in pixels. As
soon as the distance has been traveled, the :class:`Carousel` will start
to scroll, and no touch event will go to children.
It is advisable that you base this value on the dpi of your target device's
screen.
:attr:`scroll_distance` is a :class:`~kivy.properties.NumericProperty` and
defaults to 20dp.
.. versionadded:: 1.5.0
'''
anim_type = StringProperty('out_quad')
'''Type of animation to use while animating to the next/previous slide.
This should be the name of an
:class:`~kivy.animation.AnimationTransition` function.
:attr:`anim_type` is a :class:`~kivy.properties.StringProperty` and
defaults to 'out_quad'.
.. versionadded:: 1.8.0
'''
ignore_perpendicular_swipes = BooleanProperty(False)
'''ignore swipes on axis perpendicular to direction.
.. versionadded:: 1.9.2
:attr:`ignore_perpendicular_swipes` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
#### private properties, for internal use only ###
_index = NumericProperty(0, allownone=True)
_prev = ObjectProperty(None, allownone=True)
_current = ObjectProperty(None, allownone=True)
_next = ObjectProperty(None, allownone=True)
_offset = NumericProperty(0)
_touch = ObjectProperty(None, allownone=True)
_change_touch_mode_ev = None
def __init__(self, **kwargs):
self._trigger_position_visible_slides = Clock.create_trigger(
self._position_visible_slides, -1)
super(Carousel, self).__init__(**kwargs)
self._skip_slide = None
self.touch_mode_change = False
def load_slide(self, slide):
'''Animate to the slide that is passed as the argument.
.. versionchanged:: 1.8.0
'''
slides = self.slides
start, stop = slides.index(self.current_slide), slides.index(slide)
if start == stop:
return
self._skip_slide = stop
if stop > start:
self._insert_visible_slides(_next_slide=slide)
self.load_next()
else:
self._insert_visible_slides(_prev_slide=slide)
self.load_previous()
def load_previous(self):
'''Animate to the previous slide.
.. versionadded:: 1.7.0
'''
self.load_next(mode='prev')
def load_next(self, mode='next'):
'''Animate to the next slide.
.. versionadded:: 1.7.0
'''
if not self.index is None:
w, h = self.size
_direction = {
'top': -h / 2,
'bottom': h / 2,
'left': w / 2,
'right': -w / 2}
_offset = _direction[self.direction]
if mode == 'prev':
_offset = -_offset
self._start_animation(min_move=0, offset=_offset)
def get_slide_container(self, slide):
return slide.parent
def _insert_visible_slides(self, _next_slide=None, _prev_slide=None):
get_slide_container = self.get_slide_container
previous_slide = _prev_slide if _prev_slide else self.previous_slide
if previous_slide:
self._prev = get_slide_container(previous_slide)
else:
self._prev = None
current_slide = self.current_slide
if current_slide:
self._current = get_slide_container(current_slide)
else:
self._current = None
next_slide = _next_slide if _next_slide else self.next_slide
if next_slide:
self._next = get_slide_container(next_slide)
else:
self._next = None
super_remove = super(Carousel, self).remove_widget
for container in self.slides_container:
super_remove(container)
if self._prev and self._prev.parent is not self:
super(Carousel, self).add_widget(self._prev)
if self._next and self._next.parent is not self:
super(Carousel, self).add_widget(self._next)
if self._current:
super(Carousel, self).add_widget(self._current)
def _position_visible_slides(self, *args):
slides, index = self.slides, self.index
no_of_slides = len(slides) - 1
if not slides:
return
x, y, width, height = self.x, self.y, self.width, self.height
_offset, direction = self._offset, self.direction
_prev, _next, _current = self._prev, self._next, self._current
get_slide_container = self.get_slide_container
last_slide = get_slide_container(slides[-1])
first_slide = get_slide_container(slides[0])
skip_next = False
_loop = self.loop
if direction[0] in ['r', 'l']:
xoff = x + _offset
x_prev = {'l': xoff + width, 'r': xoff - width}
x_next = {'l': xoff - width, 'r': xoff + width}
if _prev:
_prev.pos = (x_prev[direction[0]], y)
elif _loop and _next and index == 0:
# if first slide is moving to right with direction set to right
# or toward left with direction set to left
if ((_offset > 0 and direction[0] == 'r') or
(_offset < 0 and direction[0] == 'l')):
# put last_slide before first slide
last_slide.pos = (x_prev[direction[0]], y)
skip_next = True
if _current:
_current.pos = (xoff, y)
if skip_next:
return
if _next:
_next.pos = (x_next[direction[0]], y)
elif _loop and _prev and index == no_of_slides:
if ((_offset < 0 and direction[0] == 'r') or
(_offset > 0 and direction[0] == 'l')):
first_slide.pos = (x_next[direction[0]], y)
if direction[0] in ['t', 'b']:
yoff = y + _offset
y_prev = {'t': yoff - height, 'b': yoff + height}
y_next = {'t': yoff + height, 'b': yoff - height}
if _prev:
_prev.pos = (x, y_prev[direction[0]])
elif _loop and _next and index == 0:
if ((_offset > 0 and direction[0] == 't') or
(_offset < 0 and direction[0] == 'b')):
last_slide.pos = (x, y_prev[direction[0]])
skip_next = True
if _current:
_current.pos = (x, yoff)
if skip_next:
return
if _next:
_next.pos = (x, y_next[direction[0]])
elif _loop and _prev and index == no_of_slides:
if ((_offset < 0 and direction[0] == 't') or
(_offset > 0 and direction[0] == 'b')):
first_slide.pos = (x, y_next[direction[0]])
def on_size(self, *args):
size = self.size
for slide in self.slides_container:
slide.size = size
self._trigger_position_visible_slides()
def on_pos(self, *args):
self._trigger_position_visible_slides()
def on_index(self, *args):
self._insert_visible_slides()
self._trigger_position_visible_slides()
self._offset = 0
def on_slides(self, *args):
if self.slides:
self.index = self.index % len(self.slides)
self._insert_visible_slides()
self._trigger_position_visible_slides()
def on__offset(self, *args):
self._trigger_position_visible_slides()
# if reached full offset, switch index to next or prev
direction = self.direction
_offset = self._offset
width = self.width
height = self.height
index = self.index
if self._skip_slide is not None or index is None:
return
if direction[0] == 'r':
if _offset <= -width:
index += 1
if _offset >= width:
index -= 1
if direction[0] == 'l':
if _offset <= -width:
index -= 1
if _offset >= width:
index += 1
if direction[0] == 't':
if _offset <= - height:
index += 1
if _offset >= height:
index -= 1
if direction[0] == 'b':
if _offset <= -height:
index -= 1
if _offset >= height:
index += 1
self.index = index
def _start_animation(self, *args, **kwargs):
# compute target offset for ease back, next or prev
new_offset = 0
direction = kwargs.get('direction', self.direction)
is_horizontal = direction[0] in ['r', 'l']
extent = self.width if is_horizontal else self.height
min_move = kwargs.get('min_move', self.min_move)
_offset = kwargs.get('offset', self._offset)
if _offset < min_move * -extent:
new_offset = -extent
elif _offset > min_move * extent:
new_offset = extent
# if new_offset is 0, it wasnt enough to go next/prev
dur = self.anim_move_duration
if new_offset == 0:
dur = self.anim_cancel_duration
# detect edge cases if not looping
len_slides = len(self.slides)
index = self.index
if not self.loop or len_slides == 1:
is_first = (index == 0)
is_last = (index == len_slides - 1)
if direction[0] in ['r', 't']:
towards_prev = (new_offset > 0)
towards_next = (new_offset < 0)
else:
towards_prev = (new_offset < 0)
towards_next = (new_offset > 0)
if (is_first and towards_prev) or (is_last and towards_next):
new_offset = 0
anim = Animation(_offset=new_offset, d=dur, t=self.anim_type)
anim.cancel_all(self)
def _cmp(*l):
if self._skip_slide is not None:
self.index = self._skip_slide
self._skip_slide = None
anim.bind(on_complete=_cmp)
anim.start(self)
def _get_uid(self, prefix='sv'):
return '{0}.{1}'.format(prefix, self.uid)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
touch.ud[self._get_uid('cavoid')] = True
return
if self.disabled:
return True
if self._touch:
return super(Carousel, self).on_touch_down(touch)
Animation.cancel_all(self)
self._touch = touch
uid = self._get_uid()
touch.grab(self)
touch.ud[uid] = {
'mode': 'unknown',
'time': touch.time_start}
self._change_touch_mode_ev = Clock.schedule_once(
self._change_touch_mode, self.scroll_timeout / 1000.)
self.touch_mode_change = False
return True
def on_touch_move(self, touch):
if self.touch_mode_change == False:
if self.ignore_perpendicular_swipes and self.direction in ('top','bottom'):
if abs(touch.oy - touch.y) < self.scroll_distance:
if abs(touch.ox - touch.x) > self.scroll_distance:
self._change_touch_mode()
self.touchModeChange = True
elif self.ignore_perpendicular_swipes and self.direction in ('right','left'):
if abs(touch.ox - touch.x) < self.scroll_distance:
if abs(touch.oy - touch.y) > self.scroll_distance:
self._change_touch_mode()
self.touchModeChange = True
if self._get_uid('cavoid') in touch.ud:
return
if self._touch is not touch:
super(Carousel, self).on_touch_move(touch)
return self._get_uid() in touch.ud
if touch.grab_current is not self:
return True
ud = touch.ud[self._get_uid()]
direction = self.direction
if ud['mode'] == 'unknown':
if direction[0] in ('r', 'l'):
distance = abs(touch.ox - touch.x)
else:
distance = abs(touch.oy - touch.y)
if distance > self.scroll_distance:
ev = self._change_touch_mode_ev
if ev is not None:
ev.cancel()
ud['mode'] = 'scroll'
else:
if direction[0] in ('r', 'l'):
self._offset += touch.dx
if direction[0] in ('t', 'b'):
self._offset += touch.dy
return True
def on_touch_up(self, touch):
if self._get_uid('cavoid') in touch.ud:
return
if self in [x() for x in touch.grab_list]:
touch.ungrab(self)
self._touch = None
ud = touch.ud[self._get_uid()]
if ud['mode'] == 'unknown':
ev = self._change_touch_mode_ev
if ev is not None:
ev.cancel()
super(Carousel, self).on_touch_down(touch)
Clock.schedule_once(partial(self._do_touch_up, touch), .1)
else:
self._start_animation()
else:
if self._touch is not touch and self.uid not in touch.ud:
super(Carousel, self).on_touch_up(touch)
return self._get_uid() in touch.ud
def _do_touch_up(self, touch, *largs):
super(Carousel, self).on_touch_up(touch)
# don't forget about grab event!
for x in touch.grab_list[:]:
touch.grab_list.remove(x)
x = x()
if not x:
continue
touch.grab_current = x
super(Carousel, self).on_touch_up(touch)
touch.grab_current = None
def _change_touch_mode(self, *largs):
if not self._touch:
return
self._start_animation()
uid = self._get_uid()
touch = self._touch
ud = touch.ud[uid]
if ud['mode'] == 'unknown':
touch.ungrab(self)
self._touch = None
super(Carousel, self).on_touch_down(touch)
return
def add_widget(self, widget, index=0):
slide = RelativeLayout(size=self.size, x=self.x - self.width, y=self.y)
slide.add_widget(widget)
super(Carousel, self).add_widget(slide, index)
if index != 0:
self.slides.insert(index - len(self.slides), widget)
else:
self.slides.append(widget)
def remove_widget(self, widget, *args, **kwargs):
# XXX be careful, the widget.parent refer to the RelativeLayout
# added in add_widget(). But it will break if RelativeLayout
# implementation change.
# if we passed the real widget
if widget in self.slides:
slide = widget.parent
self.slides.remove(widget)
return slide.remove_widget(widget, *args, **kwargs)
return super(Carousel, self).remove_widget(widget, *args, **kwargs)
def clear_widgets(self):
for slide in self.slides[:]:
self.remove_widget(slide)
super(Carousel, self).clear_widgets()
if __name__ == '__main__':
from kivy.app import App
class Example1(App):
def build(self):
carousel = Carousel(direction='left',
loop=True)
for i in range(4):
src = "http://placehold.it/480x270.png&text=slide-%d&.png" % i
image = Factory.AsyncImage(source=src, allow_stretch=True)
carousel.add_widget(image)
return carousel
Example1().run()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
=============
Model Parsers
=============
:Authors:
Moritz Emanuel Beber
Nikolaus Sonnenschein
:Date:
2011-04-07
:Copyright:
Copyright(c) 2011 Jacobs University of Bremen. All rights reserved.
:File:
parsers.py
"""
__all__ = ["parse"]
import os
import codecs
import logging
from contextlib import contextmanager
from .metabolism import metabolism
from . import miscellaneous as misc
from .errors import PyMetabolismError
from .singletonmixin import Singleton
logger = logging.getLogger(__name__)
logger.addHandler(misc.NullHandler())
class SBMLParser(Singleton):
"""
A class implementing methods for parsing a SBML model
"""
_sbml = False
def __init__(self):
if not self.__class__._sbml:
self.__class__._sbml = misc.load_module("libsbml", "SBML",
"http://sbml.org/Software/libSBML")
object.__init__(self)
self._options = misc.OptionsManager.get_instance()
def parse_string(self, xml):
"""
Parse a document in SBML format.
"""
document = self._sbml.readSBMLFromString(xml)
if document.getNumErrors() > 0:
logger.warn("reading the SBML document produced some errors")
model = document.getModel()
# parse compartments
compartments = [self._parse_compartment(comp) for comp in
model.getListOfCompartments()]
logger.debug("approx. %d compartments", len(compartments))
# parse compounds
compounds = [self._parse_species(cmpd) for cmpd in
model.getListOfSpecies()]
logger.debug("%d compounds", len(compounds))
reactions = [self._parse_reaction(rxn, model) for rxn in
model.getListOfReactions()]
logger.debug("%d reactions", len(reactions))
return metabolism.MetabolicSystem(compartments=compartments,
reactions=reactions, compounds=compounds)
def _parse_compartment(self, compartment):
suffix = ""
for (suff, name) in self._options.compartments.iteritems():
if name == compartment.getId():
suffix = suff
return metabolism.SBMLCompartment(name=compartment.getId(),
outside=compartment.getOutside(),
constant=compartment.getConstant(),
spatial_dimensions=compartment.getSpatialDimensions(),
size=compartment.getSize(), units=compartment.getUnits(),
suffix=suffix)
def _strip_species_id(self, name):
identifier = name
if identifier.startswith(self._options.compound_prefix):
identifier = identifier[len(self._options.compound_prefix):]
compartment = None
for suffix in self._options.compartments.iterkeys():
if identifier.endswith(suffix):
identifier = identifier[:-len(suffix)]
compartment = metabolism.SBMLCompartment(
self._options.compartments[suffix], suffix=suffix)
break
return (identifier, compartment)
def _parse_species(self, compound):
"""
Able to parse entries from getListOfSpecies
@todo: Check for meta information and parse if available
"""
(identifier, comp) = self._strip_species_id(compound.getId())
if not comp:
comp = metabolism.SBMLCompartment(compound.getCompartment())
name = compound.getName()
cmpd = metabolism.SBMLCompound(identifier, extended_name=name,
charge=compound.getCharge())
if not comp:
return cmpd
else:
return metabolism.SBMLCompartmentCompound(cmpd, comp)
def _strip_reaction_id(self, name):
identifier = name
if identifier.startswith(self._options.reaction_prefix):
identifier = identifier[len(self._options.reaction_prefix):]
if identifier.endswith(self._options.reversible_suffix):
identifier = identifier[:-len(self._options.reversible_suffix)]
return identifier
def _parse_reaction(self, reaction, model, note_sep=":"):
"""Able to parse entries from getListOfReactions"""
identifier = self._strip_reaction_id(reaction.getId())
name = reaction.getName()
# parse additional reaction parameters
params = dict()
for param in reaction.getKineticLaw().getListOfParameters():
params[param.getId().lower()] = param.getValue()
# substrates' stoichiometry
substrates = dict((self._parse_species(model.getSpecies(
elem.getSpecies())),
abs(elem.getStoichiometry())) for elem in
reaction.getListOfReactants())
# products' stoichiometry
products = dict((self._parse_species(model.getSpecies(
elem.getSpecies())),
abs(elem.getStoichiometry())) for elem in
reaction.getListOfProducts())
# other information contained in notes
info = dict()
notes = reaction.getNotes()
for i in range(notes.getNumChildren()):
node = notes.getChild(i)
for j in range(node.getNumChildren()):
item = node.getChild(j).toString().split(note_sep, 1)
if len(item) == 2:
key = item[0].strip().lower().replace(" ", "_")
value = item[1].strip()
info[key] = value
return metabolism.SBMLReaction(identifier, substrates, products,
reversible=reaction.getReversible(), extended_name=name,
notes=info, **params)
def _open_tar(path, **kw_args):
import tarfile
kw_args["mode"] = kw_args["mode"].strip("b")
if isinstance(path, basestring):
return tarfile.TarFile(name=path, mode=kw_args["mode"],
encoding=kw_args["encoding"])
else:
return tarfile.TarFile(fileobj=path, mode=kw_args["mode"],
encoding=kw_args["encoding"])
def _open_gz(path, **kw_args):
import gzip
if isinstance(path, basestring):
return gzip.GzipFile(filename=path, mode=kw_args["mode"])
else:
return gzip.GzipFile(fileobj=path, mode=kw_args["mode"])
def _open_bz2(path, **kw_args):
import bz2
return bz2.BZ2File(path)
def _open_zip(path, **kw_args):
import zipfile
kw_args["mode"] = kw_args["mode"].strip("b")
return zipfile.ZipFile(path, mode=kw_args["mode"])
def _open_file(path, **kw_args):
if isinstance(path, basestring):
return codecs.open(path, mode=kw_args["mode"],
encoding=kw_args["encoding"])
else:
reader = codecs.getreader(kw_args["encoding"])
return reader(path)
archives = {"gz": _open_gz,
"gzip": _open_gz,
"bz2": _open_bz2
# "zip": _open_zip,
# "tar": _open_tar
}
@contextmanager
def open_file(filename, **kw_args):
path = filename
filename = os.path.basename(filename)
extns = filename.split(".")
del extns[0]
extns.reverse()
for ext in extns:
ext = ext.lower()
func = archives.get(ext, _open_file)
path = func(path, **kw_args)
yield (path, ext)
if not path.closed:
path.close()
parsers = {"xml": SBMLParser,
"sbml": SBMLParser
}
def parse(filename, frmt=False, mode="rb", encoding="utf-8", **kw_args):
kw_args["mode"] = mode
kw_args["encoding"] = encoding
with open_file(filename, **kw_args) as (file_h, ext):
if frmt:
ext = frmt.lower()
if ext in parsers:
parser = parsers[ext].get_instance()
else:
raise PyMetabolismError("unknown metabolic system format '{}'", ext)
system = parser.parse_string(str(file_h.read(-1)))
return system
|
|
# Copyright (c) 2013-2021 khal contributors
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""this module contains some helper functions converting strings or list of
strings to date(time) or event objects"""
import datetime as dt
import logging
import re
from calendar import isleap
from time import strptime
import pytz
from khal.exceptions import FatalError, DateTimeParseError
logger = logging.getLogger('khal')
def timefstr(dtime_list, timeformat):
"""converts the first item of a list (a time as a string) to a datetimeobject
where the date is today and the time is given by a string
removes "used" elements of list
:type dtime_list: list(str)
:type timeformat: str
:rtype: datetime.datetime
"""
if len(dtime_list) == 0:
raise ValueError()
time_start = dt.datetime.strptime(dtime_list[0], timeformat)
time_start = dt.time(*time_start.timetuple()[3:5])
day_start = dt.date.today()
dtstart = dt.datetime.combine(day_start, time_start)
dtime_list.pop(0)
return dtstart
def datetimefstr(dtime_list, dateformat, default_day=None, infer_year=True,
in_future=True):
"""converts a datetime (as one or several string elements of a list) to
a datetimeobject, if infer_year is True, use the `default_day`'s year as
the year of the return datetimeobject,
removes "used" elements of list
example: dtime_list = ['17.03.', 'description']
dateformat = '%d.%m.'
or : dtime_list = ['17.03.', '16:00', 'description']
dateformat = '%d.%m. %H:%M'
"""
# if now() is called as default param, mocking with freezegun won't work
now = dt.datetime.now()
if default_day is None:
default_day = now.date()
parts = dateformat.count(' ') + 1
dtstring = ' '.join(dtime_list[0:parts])
# only time.strptime can parse the 29th of Feb. if no year is given
dtstart = strptime(dtstring, dateformat)
if infer_year and dtstart.tm_mon == 2 and dtstart.tm_mday == 29 and \
not isleap(default_day.year):
raise ValueError
for _ in range(parts):
dtime_list.pop(0)
if infer_year:
dtstart = dt.datetime(*(default_day.timetuple()[:1] + dtstart[1:5]))
if in_future and dtstart < now:
dtstart = dtstart.replace(year=dtstart.year + 1)
if dtstart.date() < default_day:
dtstart = dtstart.replace(year=default_day.year + 1)
return dtstart
else:
return dt.datetime(*dtstart[:5])
def weekdaypstr(dayname):
"""converts an (abbreviated) dayname to a number (mon=0, sun=6)
:param dayname: name of abbreviation of the day
:type dayname: str
:return: number of the day in a week
:rtype: int
"""
if dayname in ['monday', 'mon']:
return 0
if dayname in ['tuesday', 'tue']:
return 1
if dayname in ['wednesday', 'wed']:
return 2
if dayname in ['thursday', 'thu']:
return 3
if dayname in ['friday', 'fri']:
return 4
if dayname in ['saturday', 'sat']:
return 5
if dayname in ['sunday', 'sun']:
return 6
raise ValueError('invalid weekday name `%s`' % dayname)
def construct_daynames(date_):
"""converts datetime.date into a string description
either `Today`, `Tomorrow` or name of weekday.
"""
if date_ == dt.date.today():
return 'Today'
elif date_ == dt.date.today() + dt.timedelta(days=1):
return 'Tomorrow'
else:
return date_.strftime('%A')
def calc_day(dayname):
"""converts a relative date's description to a datetime object
:param dayname: relative day name (like 'today' or 'monday')
:type dayname: str
:returns: date
:rtype: datetime.datetime
"""
today = dt.datetime.combine(dt.date.today(), dt.time.min)
dayname = dayname.lower()
if dayname == 'today':
return today
if dayname == 'tomorrow':
return today + dt.timedelta(days=1)
if dayname == 'yesterday':
return today - dt.timedelta(days=1)
wday = weekdaypstr(dayname)
days = (wday - today.weekday()) % 7
days = 7 if days == 0 else days
day = today + dt.timedelta(days=days)
return day
def datefstr_weekday(dtime_list, _, **kwargs):
"""interprets first element of a list as a relative date and removes that
element
:param dtime_list: event description in list form
:type dtime_list: list
:returns: date
:rtype: datetime.datetime
"""
if len(dtime_list) == 0:
raise ValueError()
day = calc_day(dtime_list[0])
dtime_list.pop(0)
return day
def datetimefstr_weekday(dtime_list, timeformat, **kwargs):
if len(dtime_list) == 0:
raise ValueError()
day = calc_day(dtime_list[0])
this_time = timefstr(dtime_list[1:], timeformat)
dtime_list.pop(0)
dtime_list.pop(0) # we need to pop twice as timefstr gets a copy
dtime = dt.datetime.combine(day, this_time.time())
return dtime
def guessdatetimefstr(dtime_list, locale, default_day=None, in_future=True):
"""
:type dtime_list: list
:type locale: dict
:type default_day: datetime.datetime
:param in_future: if set, shortdate(time) events will be set in the future
:type in_future: bool
:rtype: datetime.datetime
"""
# if now() is called as default param, mocking with freezegun won't work
if default_day is None:
default_day = dt.datetime.now().date()
# TODO rename in guessdatetimefstrLIST or something saner altogether
def timefstr_day(dtime_list, timeformat, **kwargs):
if locale['timeformat'] == '%H:%M' and dtime_list[0] == '24:00':
a_date = dt.datetime.combine(default_day, dt.time(0))
dtime_list.pop(0)
else:
a_date = timefstr(dtime_list, timeformat)
a_date = dt.datetime(*(default_day.timetuple()[:3] + a_date.timetuple()[3:5]))
return a_date
def datetimefwords(dtime_list, _, **kwargs):
if len(dtime_list) > 0 and dtime_list[0].lower() == 'now':
dtime_list.pop(0)
return dt.datetime.now()
raise ValueError
def datefstr_year(dtime_list, dtformat, infer_year):
return datetimefstr(dtime_list, dtformat, default_day, infer_year, in_future)
dtstart = None
for fun, dtformat, all_day, infer_year in [
(datefstr_year, locale['datetimeformat'], False, True),
(datefstr_year, locale['longdatetimeformat'], False, False),
(timefstr_day, locale['timeformat'], False, False),
(datetimefstr_weekday, locale['timeformat'], False, False),
(datefstr_year, locale['dateformat'], True, True),
(datefstr_year, locale['longdateformat'], True, False),
(datefstr_weekday, None, True, False),
(datetimefwords, None, False, False),
]:
# if a `short` format contains a year, treat it as a `long` format
if infer_year and '97' in dt.datetime(1997, 10, 11).strftime(dtformat):
infer_year = False
try:
dtstart = fun(dtime_list, dtformat, infer_year=infer_year)
except (ValueError, DateTimeParseError):
pass
else:
return dtstart, all_day
raise DateTimeParseError(
"Could not parse \"{}\".\nPlease check your configuration or run "
"`khal printformats` to see if this does match your configured "
"[long](date|time|datetime)format.\nIf you suspect a bug, please "
"file an issue at https://github.com/pimutils/khal/issues/ "
"".format(dtime_list)
)
def timedelta2str(delta):
# we deliberately ignore any subsecond deltas
total_seconds = int(abs(delta).total_seconds())
seconds = total_seconds % 60
total_seconds -= seconds
total_minutes = total_seconds // 60
minutes = total_minutes % 60
total_minutes -= minutes
total_hours = total_minutes // 60
hours = total_hours % 24
total_hours -= hours
days = total_hours // 24
s = []
if days:
s.append(str(days) + "d")
if hours:
s.append(str(hours) + "h")
if minutes:
s.append(str(minutes) + "m")
if seconds:
s.append(str(seconds) + "s")
if delta != abs(delta):
s = ["-" + part for part in s]
return ' '.join(s)
def guesstimedeltafstr(delta_string):
"""parses a timedelta from a string
:param delta_string: string encoding time-delta, e.g. '1h 15m'
:type delta_string: str
:rtype: datetime.timedelta
"""
tups = re.split(r'(-?\d+)', delta_string)
if not re.match(r'^\s*$', tups[0]):
raise ValueError('Invalid beginning of timedelta string "%s": "%s"'
% (delta_string, tups[0]))
tups = tups[1:]
res = dt.timedelta()
for num, unit in zip(tups[0::2], tups[1::2]):
try:
numint = int(num)
except ValueError:
raise DateTimeParseError(
'Invalid number in timedelta string "%s": "%s"' % (delta_string, num))
ulower = unit.lower().strip()
if ulower == 'd' or ulower == 'day' or ulower == 'days':
res += dt.timedelta(days=numint)
elif ulower == 'h' or ulower == 'hour' or ulower == 'hours':
res += dt.timedelta(hours=numint)
elif (ulower == 'm' or ulower == 'minute' or ulower == 'minutes' or
ulower == 'min'):
res += dt.timedelta(minutes=numint)
elif (ulower == 's' or ulower == 'second' or ulower == 'seconds' or
ulower == 'sec'):
res += dt.timedelta(seconds=numint)
else:
raise ValueError('Invalid unit in timedelta string "%s": "%s"'
% (delta_string, unit))
return res
def guessrangefstr(daterange, locale,
default_timedelta_date=dt.timedelta(days=1),
default_timedelta_datetime=dt.timedelta(hours=1),
adjust_reasonably=False,
):
"""parses a range string
:param daterange: date1 [date2 | timedelta]
:type daterange: str or list
:param locale:
:returns: start and end of the date(time) range and if
this is an all-day time range or not,
**NOTE**: the end is *exclusive* if this is an allday event
:rtype: (datetime, datetime, bool)
"""
range_list = daterange
if isinstance(daterange, str):
range_list = daterange.split(' ')
if range_list == ['week']:
today_weekday = dt.datetime.today().weekday()
start = dt.datetime.today() - dt.timedelta(days=(today_weekday - locale['firstweekday']))
end = start + dt.timedelta(days=8)
return start, end, True
for i in reversed(range(1, len(range_list) + 1)):
start = ' '.join(range_list[:i])
end = ' '.join(range_list[i:])
allday = False
try:
# figuring out start
split = start.split(" ")
start, allday = guessdatetimefstr(split, locale)
if len(split) != 0:
continue
# and end
if len(end) == 0:
if allday:
end = start + default_timedelta_date
else:
end = start + default_timedelta_datetime
elif end.lower() == 'eod':
end = dt.datetime.combine(start.date(), dt.time.max)
elif end.lower() == 'week':
start -= dt.timedelta(days=(start.weekday() - locale['firstweekday']))
end = start + dt.timedelta(days=8)
else:
try:
delta = guesstimedeltafstr(end)
if allday and delta.total_seconds() % (3600 * 24):
# TODO better error class, no logging in here
logger.fatal(
"Cannot give delta containing anything but whole days for allday events"
)
raise FatalError()
elif delta.total_seconds() == 0:
logger.fatal(
"Events that last no time are not allowed"
)
raise FatalError()
end = start + delta
except (ValueError, DateTimeParseError):
split = end.split(" ")
end, end_allday = guessdatetimefstr(
split, locale, default_day=start.date(), in_future=False)
if len(split) != 0:
continue
if allday:
end += dt.timedelta(days=1)
if adjust_reasonably:
if allday:
# test if end's year is this year, but start's year is not
today = dt.datetime.today()
if end.year == today.year and start.year != today.year:
end = dt.datetime(start.year, *end.timetuple()[1:6])
if end < start:
end = dt.datetime(end.year + 1, *end.timetuple()[1:6])
if end < start:
end = dt.datetime(*start.timetuple()[0:3] + end.timetuple()[3:5])
if end < start:
end = end + dt.timedelta(days=1)
return start, end, allday
except (ValueError, DateTimeParseError):
pass
raise DateTimeParseError(
"Could not parse \"{}\".\nPlease check your configuration or run "
"`khal printformats` to see if this does match your configured "
"[long](date|time|datetime)format.\nIf you suspect a bug, please "
"file an issue at https://github.com/pimutils/khal/issues/ "
"".format(daterange)
)
def rrulefstr(repeat, until, locale):
if repeat in ["daily", "weekly", "monthly", "yearly"]:
rrule_settings = {'freq': repeat}
if until:
until_dt, is_date = guessdatetimefstr(until.split(' '), locale)
rrule_settings['until'] = until_dt
return rrule_settings
else:
logger.fatal("Invalid value for the repeat option. \
Possible values are: daily, weekly, monthly or yearly")
raise FatalError()
def eventinfofstr(info_string, locale, default_event_duration, default_dayevent_duration,
adjust_reasonably=False, localize=False):
"""parses a string of the form START [END | DELTA] [TIMEZONE] [SUMMARY] [::
DESCRIPTION] into a dictionary with keys: dtstart, dtend, timezone, allday,
summary, description
:param info_string:
:type info_string: string fitting the form
:param locale:
:type locale: locale
:param adjust_reasonably:
:type adjust_reasonably: passed on to guessrangefstr
:rtype: dictionary
"""
description = None
if " :: " in info_string:
info_string, description = info_string.split(' :: ')
parts = info_string.split(' ')
summary = None
start = None
end = None
tz = None
allday = False
for i in reversed(range(1, len(parts) + 1)):
try:
start, end, allday = guessrangefstr(
' '.join(parts[0:i]), locale,
default_event_duration,
default_dayevent_duration,
adjust_reasonably=adjust_reasonably,
)
except (ValueError, DateTimeParseError):
continue
if start is not None and end is not None:
try:
# next element is a valid Olson db timezone string
tz = pytz.timezone(parts[i])
i += 1
except (pytz.UnknownTimeZoneError, UnicodeDecodeError, IndexError):
tz = None
summary = ' '.join(parts[i:])
break
if start is None or end is None:
raise DateTimeParseError(
"Could not parse \"{}\".\nPlease check your configuration or run "
"`khal printformats` to see if this does match your configured "
"[long](date|time|datetime)format.\nIf you suspect a bug, please "
"file an issue at https://github.com/pimutils/khal/issues/ "
"".format(info_string)
)
if tz is None:
tz = locale['default_timezone']
if allday:
start = start.date()
end = end.date()
info = {}
info["dtstart"] = start
info["dtend"] = end
info["summary"] = summary if summary else None
info["description"] = description
info["timezone"] = tz if not allday else None
info["allday"] = allday
return info
|
|
#!/usr/bin/env python
################################################################################
# Copyright (C) The Qt Company Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of The Qt Company Ltd, nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
import os
import sys
import getopt
import subprocess
import re
import shutil
from glob import glob
import common
ignoreErrors = False
debug_build = False
def usage():
print "Usage: %s <creator_install_dir> [qmake_path]" % os.path.basename(sys.argv[0])
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath = os.path.dirname(program)
if fpath:
if is_exe(program):
return program
if common.is_windows_platform():
if is_exe(program + ".exe"):
return program + ".exe"
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
if common.is_windows_platform():
if is_exe(exe_file + ".exe"):
return exe_file + ".exe"
return None
def is_debug(fpath):
# match all Qt Core dlls from Qt4, Qt5beta2 and Qt5rc1 and later
# which all have the number at different places
coredebug = re.compile(r'Qt[1-9]?Core[1-9]?d[1-9]?.dll')
# bootstrap exception
if coredebug.search(fpath):
return True
output = subprocess.check_output(['objdump', '-x', fpath])
return coredebug.search(output) != None
def is_debug_build(install_dir):
return is_debug(os.path.join(install_dir, 'bin', 'openmvswd.exe'))
def op_failed(details = None):
if details != None:
print details
if ignoreErrors == False:
print("Error: operation failed!")
sys.exit(2)
else:
print("Error: operation failed, but proceeding gracefully.")
def is_ignored_windows_file(use_debug, basepath, filename):
ignore_patterns = ['.lib', '.pdb', '.exp', '.ilk']
if use_debug:
ignore_patterns.extend(['libEGL.dll', 'libGLESv2.dll'])
else:
ignore_patterns.extend(['libEGLd.dll', 'libGLESv2d.dll'])
for ip in ignore_patterns:
if filename.endswith(ip):
return True
if filename.endswith('.dll'):
filepath = os.path.join(basepath, filename)
if use_debug != is_debug(filepath):
return True
return False
def ignored_qt_lib_files(path, filenames):
if not common.is_windows_platform():
return []
return [fn for fn in filenames if is_ignored_windows_file(debug_build, path, fn)]
def copy_qt_libs(target_qt_prefix_path, qt_libs_dir, qt_plugin_dir, qt_import_dir, qt_qml_dir, plugins, imports):
print "copying Qt libraries..."
if common.is_windows_platform():
libraries = glob(os.path.join(qt_libs_dir, '*.dll'))
else:
libraries = glob(os.path.join(qt_libs_dir, '*.so.*'))
if common.is_windows_platform():
lib_dest = os.path.join(target_qt_prefix_path)
else:
lib_dest = os.path.join(target_qt_prefix_path, 'lib')
if not os.path.exists(lib_dest):
os.makedirs(lib_dest)
if common.is_windows_platform():
libraries = [lib for lib in libraries if not is_ignored_windows_file(debug_build, '', lib)]
for library in libraries:
print library, '->', lib_dest
if os.path.islink(library):
linkto = os.readlink(library)
try:
os.symlink(linkto, os.path.join(lib_dest, os.path.basename(library)))
except OSError:
op_failed("Link already exists!")
else:
shutil.copy(library, lib_dest)
print "Copying plugins:", plugins
for plugin in plugins:
target = os.path.join(target_qt_prefix_path, 'plugins', plugin)
if (os.path.exists(target)):
shutil.rmtree(target)
pluginPath = os.path.join(qt_plugin_dir, plugin)
if (os.path.exists(pluginPath)):
print('{0} -> {1}'.format(pluginPath, target))
common.copytree(pluginPath, target, ignore=ignored_qt_lib_files, symlinks=True)
print "Copying imports:", imports
for qtimport in imports:
target = os.path.join(target_qt_prefix_path, 'imports', qtimport)
if (os.path.exists(target)):
shutil.rmtree(target)
import_path = os.path.join(qt_import_dir, qtimport)
if os.path.exists(import_path):
print('{0} -> {1}'.format(import_path, target))
common.copytree(import_path, target, ignore=ignored_qt_lib_files, symlinks=True)
if (os.path.exists(qt_qml_dir)):
print "Copying qt quick 2 imports"
target = os.path.join(target_qt_prefix_path, 'qml')
if (os.path.exists(target)):
shutil.rmtree(target)
print('{0} -> {1}'.format(qt_qml_dir, target))
common.copytree(qt_qml_dir, target, ignore=ignored_qt_lib_files, symlinks=True)
def add_qt_conf(target_path, qt_prefix_path):
qtconf_filepath = os.path.join(target_path, 'qt.conf')
prefix_path = os.path.relpath(qt_prefix_path, target_path).replace('\\', '/')
print('Creating qt.conf in "{0}":'.format(qtconf_filepath))
f = open(qtconf_filepath, 'w')
f.write('[Paths]\n')
f.write('Prefix={0}\n'.format(prefix_path))
if common.is_linux_platform():
f.write('Libraries=lib\n')
f.write('Plugins=plugins\n')
f.close()
def copy_translations(install_dir, qt_tr_dir):
translations = glob(os.path.join(qt_tr_dir, '*.qm'))
tr_dir = os.path.join(install_dir, 'share', 'qtcreator', 'translations')
print "copying translations..."
for translation in translations:
print translation, '->', tr_dir
shutil.copy(translation, tr_dir)
def copyPreservingLinks(source, destination):
if os.path.islink(source):
linkto = os.readlink(source)
destFilePath = destination
if os.path.isdir(destination):
destFilePath = os.path.join(destination, os.path.basename(source))
os.symlink(linkto, destFilePath)
else:
shutil.copy(source, destination)
def deploy_libclang(install_dir, llvm_install_dir, chrpath_bin):
# contains pairs of (source, target directory)
deployinfo = []
if common.is_windows_platform():
deployinfo.append((os.path.join(llvm_install_dir, 'bin', 'libclang.dll'),
os.path.join(install_dir, 'bin')))
deployinfo.append((os.path.join(llvm_install_dir, 'bin', 'clang.exe'),
os.path.join(install_dir, 'bin')))
deployinfo.append((os.path.join(llvm_install_dir, 'bin', 'clang-cl.exe'),
os.path.join(install_dir, 'bin')))
else:
libsources = glob(os.path.join(llvm_install_dir, 'lib', 'libclang.so*'))
for libsource in libsources:
deployinfo.append((libsource, os.path.join(install_dir, 'lib', 'qtcreator')))
clangbinary = os.path.join(llvm_install_dir, 'bin', 'clang')
clangbinary_targetdir = os.path.join(install_dir, 'libexec', 'qtcreator')
deployinfo.append((clangbinary, clangbinary_targetdir))
# copy link target if clang is actually a symlink
if os.path.islink(clangbinary):
linktarget = os.readlink(clangbinary)
deployinfo.append((os.path.join(os.path.dirname(clangbinary), linktarget),
os.path.join(clangbinary_targetdir, linktarget)))
resourcesource = os.path.join(llvm_install_dir, 'lib', 'clang')
resourcetarget = os.path.join(install_dir, 'share', 'qtcreator', 'cplusplus', 'clang')
print "copying libclang..."
for source, target in deployinfo:
print source, '->', target
copyPreservingLinks(source, target)
if common.is_linux_platform():
# libclang was statically compiled, so there is no need for the RPATHs
# and they are confusing when fixing RPATHs later in the process
print "removing libclang RPATHs..."
for source, target in deployinfo:
if not os.path.islink(target):
targetfilepath = target if not os.path.isdir(target) else os.path.join(target, os.path.basename(source))
subprocess.check_call([chrpath_bin, '-d', targetfilepath])
print resourcesource, '->', resourcetarget
if (os.path.exists(resourcetarget)):
shutil.rmtree(resourcetarget)
common.copytree(resourcesource, resourcetarget, symlinks=True)
def main():
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'hi', ['help', 'ignore-errors'])
except getopt.GetoptError:
usage()
sys.exit(2)
for o, _ in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-i', '--ignore-errors'):
global ignoreErrors
ignoreErrors = True
print "Note: Ignoring all errors"
if len(args) < 1:
usage()
sys.exit(2)
install_dir = args[0]
if common.is_linux_platform():
qt_deploy_prefix = os.path.join(install_dir, 'lib', 'Qt')
else:
qt_deploy_prefix = os.path.join(install_dir, 'bin')
qmake_bin = 'qmake'
if len(args) > 1:
qmake_bin = args[1]
qmake_bin = which(qmake_bin)
if qmake_bin == None:
print "Cannot find required binary 'qmake'."
sys.exit(2)
chrpath_bin = None
if common.is_linux_platform():
chrpath_bin = which('chrpath')
if chrpath_bin == None:
print "Cannot find required binary 'chrpath'."
sys.exit(2)
qt_install_info = common.get_qt_install_info(qmake_bin)
QT_INSTALL_LIBS = qt_install_info['QT_INSTALL_LIBS']
QT_INSTALL_BINS = qt_install_info['QT_INSTALL_BINS']
QT_INSTALL_PLUGINS = qt_install_info['QT_INSTALL_PLUGINS']
QT_INSTALL_IMPORTS = qt_install_info['QT_INSTALL_IMPORTS']
QT_INSTALL_QML = ""
QT_INSTALL_TRANSLATIONS = qt_install_info['QT_INSTALL_TRANSLATIONS']
plugins = ['egldeviceintegrations', 'iconengines', 'imageformats', 'platforms', 'printsupport', 'sqldrivers', 'xcbglintegrations']
imports = ['Qt', 'QtWebKit']
if common.is_windows_platform():
global debug_build
debug_build = is_debug_build(install_dir)
if common.is_windows_platform():
copy_qt_libs(qt_deploy_prefix, QT_INSTALL_BINS, QT_INSTALL_PLUGINS, QT_INSTALL_IMPORTS, QT_INSTALL_QML, plugins, imports)
else:
copy_qt_libs(qt_deploy_prefix, QT_INSTALL_LIBS, QT_INSTALL_PLUGINS, QT_INSTALL_IMPORTS, QT_INSTALL_QML, plugins, imports)
if "LLVM_INSTALL_DIR" in os.environ:
deploy_libclang(install_dir, os.environ["LLVM_INSTALL_DIR"], chrpath_bin)
if not common.is_windows_platform():
print "fixing rpaths..."
common.fix_rpaths(install_dir, os.path.join(qt_deploy_prefix, 'lib'), qt_install_info, chrpath_bin)
add_qt_conf(os.path.join(install_dir, 'bin'), qt_deploy_prefix)
if __name__ == "__main__":
if common.is_mac_platform():
print "Mac OS is not supported by this script, please use macqtdeploy!"
sys.exit(2)
else:
main()
|
|
# --------------------------------------------------------
# Caffe @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import dragon.ops as ops
from dragon.core.tensor import Tensor
from ..layer import Layer
class InnerProductLayer(Layer):
"""The implementation of ``InnerProductLayer``.
Parameters
----------
num_output : int
The output dim. Refer `InnerProductParameter.num_output`_.
bias_term : boolean
Whether to use bias. Refer `InnerProductParameter.bias_term`_.
weight_filler : caffe_pb2.FillerParameter
The filler of weight. Refer `InnerProductParameter.weight_filler`_.
bias_filler : caffe_pb2.FillerParameter
The filler of bias. Refer `InnerProductParameter.bias_filler`_.
axis : int
The start axis to calculate. Refer `InnerProductParameter.axis`_.
transpose : boolean
Whether to transpose the weights. Refer `InnerProductParameter.transpose`_.
"""
def __init__(self, LayerParameter):
super(InnerProductLayer, self).__init__(LayerParameter)
param = LayerParameter.inner_product_param
self._param = {'axis': param.axis,
'num_output': param.num_output,
'TransW': not param.transpose}
weight = Tensor(LayerParameter.name + '@param0')
weight_diff = Tensor(LayerParameter.name + '@param0_grad')
self.Fill(weight, param, 'weight_filler')
self._blobs.append({'data': weight, 'diff': weight_diff})
if param.bias_term:
bias = Tensor(LayerParameter.name + '@param1')
bias_diff = Tensor(LayerParameter.name + '@param1_grad')
self.Fill(bias, param, 'bias_filler')
self._blobs.append({'data': bias, 'diff': bias_diff})
def Setup(self, bottom):
super(InnerProductLayer, self).Setup(bottom)
return ops.InnerProduct(bottom + [blob['data'] for blob in self._blobs], **self._param)
class AccuracyLayer(Layer):
"""The implementation of ``AccuracyLayer``.
Parameters
----------
top_k : int
The top-k accuracy. Refer `AccuracyParameter.top_k`_.
axis : int
The axis of classes. Refer `AccuracyParameter.axis`_.
ignore_label : int
The label to ignore. Refer `AccuracyParameter.ignore_label`_.
"""
def __init__(self, LayerParameter):
super(AccuracyLayer, self).__init__(LayerParameter)
param = LayerParameter.accuracy_param
self._param = {'top_k': param.top_k,
'ignore_labels': [param.ignore_label]
if param.HasField('ignore_label') else []}
def Setup(self, bottom):
super(AccuracyLayer, self).Setup(bottom)
return ops.Accuracy(bottom, **self._param)
class PythonLayer(Layer):
"""The implementation of ``PythonLayer``.
Parameters
----------
module : str
The module. Refer `PythonParameter.module`_.
layer : str
The class name of layer. Refer `PythonParameter.layer`_.
param_str : str
The str describing parameters. Refer `PythonParameter.param_str`_.
"""
def __init__(self, LayerParameter):
super(PythonLayer, self).__init__(LayerParameter)
param = LayerParameter.python_param
self._param = {'module': param.module,
'op': param.layer,
'param_str': param.param_str}
def Setup(self, bottom):
super(PythonLayer, self).Setup(bottom)
return ops.Run(bottom, nout=len(self._top), **self._param)
class EltwiseLayer(Layer):
"""The implementation of ``EltwiseLayer``.
Parameters
----------
operation : EltwiseParameter.EltwiseOp
The operation. Refer `EltwiseParameter.operation`_.
coeff : list of float
The coefficients. Refer `EltwiseParameter.coeff`_.
"""
def __init__(self, LayerParameter):
super(EltwiseLayer, self).__init__(LayerParameter)
param = LayerParameter.eltwise_param
self._param = {'operation': {0: 'PROD', 1: 'SUM', 2: 'MAX'}[param.operation],
'coeffs': [element for element in param.coeff]
if len(param.coeff) > 0 else None}
def Setup(self, bottom):
super(EltwiseLayer, self).Setup(bottom)
return ops.Eltwise(bottom, **self._param)
class AddLayer(Layer):
"""
The extended implementation of ``EltwiseLayer``.
"""
def __init__(self, LayerParameter):
super(AddLayer, self).__init__(LayerParameter)
def Setup(self, bottom):
super(AddLayer, self).Setup(bottom)
return ops.Add(bottom, **self._param)
class ConcatLayer(Layer):
"""The implementation of ``ConcatLayer``.
Parameters
----------
axis : int
The axis to concatenate. Refer `ConcatParameter.axis`_.
"""
def __init__(self, LayerParameter):
super(ConcatLayer, self).__init__(LayerParameter)
param = LayerParameter.concat_param
self._param = {'axis': param.axis}
def Setup(self, bottom):
super(ConcatLayer, self).Setup(bottom)
return ops.Concat(bottom, **self._param)
class DenseConcatLayer(Layer):
"""The extended implementation for `DenseNet`_.
Parameters
----------
axis : int
The axis to concatenate. Refer `ConcatParameter.axis`_.
growth_rate : int
The growth rate.
"""
def __init__(self, LayerParameter):
super(DenseConcatLayer, self).__init__(LayerParameter)
param = LayerParameter.dense_concat_param
self._param = {'axis': param.axis,
'growth_rate': param.growth_rate}
def Setup(self, bottom):
super(DenseConcatLayer, self).Setup(bottom)
return ops.DenseConcat(bottom, **self._param)
class CropLayer(Layer):
"""The implementation of ``CropLayer``.
Parameters
----------
axis : int
The start axis. Refer `CropParameter.axis`_.
offset : list of int
The offsets. Refer `CropParameter.offset`_.
"""
def __init__(self, LayerParameter):
super(CropLayer, self).__init__(LayerParameter)
param = LayerParameter.crop_param
self._param = {'start_axis': param.axis,
'offsets': [int(element) for element in param.offset]}
def Setup(self, bottom):
super(CropLayer, self).Setup(bottom)
self._param['shape_like'] = bottom[1]
self._param['starts'] = self._param['ends'] = None
return ops.Crop(bottom[0], **self._param)
class ReshapeLayer(Layer):
"""The implementation of ``ReshapeLayer``.
Parameters
----------
shape : list of int
The output shape. Refer `ReshapeParameter.shape`_.
"""
def __init__(self, LayerParameter):
super(ReshapeLayer, self).__init__(LayerParameter)
param = LayerParameter.reshape_param
shape = param.shape
self._param = {'shape': [int(element) for element in shape.dim]}
def Setup(self, bottom):
super(ReshapeLayer, self).Setup(bottom)
input = bottom[0] if isinstance(bottom, list) else bottom
return ops.Reshape(input, **self._param)
class PermuteLayer(Layer):
"""The implementation of ``PermuteLayer``.
Parameters
----------
order : list of int
The permutation. Refer `PermuteParameter.order`_.
"""
def __init__(self, LayerParameter):
super(PermuteLayer, self).__init__(LayerParameter)
param = LayerParameter.permute_param
self._param = {'perms': [int(element) for element in param.order]}
def Setup(self, bottom):
super(PermuteLayer, self).Setup(bottom)
input = bottom[0] if isinstance(bottom, list) else bottom
return ops.Transpose(input, **self._param)
class FlattenLayer(Layer):
"""The implementation of ``FlattenLayer``.
Parameters
----------
axis : int
The start axis. Refer `FlattenParameter.axis`_.
end_axis : int
The end axis. Refer `FlattenParameter.end_axis`_.
"""
def __init__(self, LayerParameter):
super(FlattenLayer, self).__init__(LayerParameter)
param = LayerParameter.flatten_param
axis = param.axis; end_axis = param.end_axis
num_axes = end_axis - axis + 1 if end_axis != -1 else -1
self._param = {'axis': axis, 'num_axes': num_axes}
def Setup(self, bottom):
super(FlattenLayer, self).Setup(bottom)
input = bottom[0] if isinstance(bottom, list) else bottom
return ops.Flatten(input, **self._param)
class GatherLayer(Layer):
"""The extended implementation of ``GatherOp``.
Parameters
----------
axis : int
The axis for gathering. Refer ``GatherParameter.axis``.
"""
def __init__(self, LayerParameter):
super(GatherLayer, self).__init__(LayerParameter)
param = LayerParameter.gather_param
self._param = {'axis': param.axis}
def Setup(self, bottom):
super(GatherLayer, self).Setup(bottom)
return ops.Gather(bottom[0], indices=bottom[1], **self._param)
class SoftmaxLayer(Layer):
"""The implementation of ``SoftmaxLayer``.
Parameters
----------
axis : int
The axis to perform softmax. Refer `SoftmaxParameter.axis`_.
"""
def __init__(self, LayerParameter):
super(SoftmaxLayer, self).__init__(LayerParameter)
param = LayerParameter.softmax_param
self._param = {'axis': param.axis}
def Setup(self, bottom):
super(SoftmaxLayer, self).Setup(bottom)
input = bottom[0] if isinstance(bottom, list) else bottom
return ops.Softmax(input, **self._param)
class ArgMaxLayer(Layer):
"""The implementation of ``ArgMaxLayer``.
Parameters
----------
top_k : int
The top k results to keep. Refer `ArgMaxParameter.top_k`_.
axis : int
The axis to perform argmax. Refer `ArgMaxParameter.axis`_.
"""
def __init__(self, LayerParameter):
super(ArgMaxLayer, self).__init__(LayerParameter)
param = LayerParameter.argmax_param
self._param = {'top_k': param.top_k,
'axis': param.axis,
'keep_dims': True}
def Setup(self, bottom):
super(ArgMaxLayer, self).Setup(bottom)
input = bottom[0] if isinstance(bottom, list) else bottom
return ops.Argmax(input, **self._param)
class BatchNormLayer(Layer):
"""The implementation of ``BatchNormLayer``.
Parameters
----------
use_global_stats : boolean
Refer `BatchNormParameter.use_global_stats`_.
moving_average_fraction : float
Refer `BatchNormParameter.moving_average_fraction`_.
eps : float
Refer `BatchNormParameter.eps`_.
"""
def __init__(self, LayerParameter):
super(BatchNormLayer, self).__init__(LayerParameter)
param = LayerParameter.batch_norm_param
self._param = {'use_stats': int(param.use_global_stats)
if param.HasField('use_global_stats') else -1,
'momentum': param.moving_average_fraction,
'eps': param.eps,
'axis': 1,
'mode': 'CAFFE'}
# mean, var, factor are set to 0 in order to do statistics
mean = Tensor(LayerParameter.name + '@param0').Constant(value=0.0)
var = Tensor(LayerParameter.name + '@param1').Constant(value=0.0)
factor = Tensor(LayerParameter.name + '@param2').Constant(value=0.0)
# in dragon, set diff as None will ignore computing grad automatically
# but in bvlc-caffe1, you must set lr_mult = 0 manually
self._blobs.append({'data': mean, 'diff': None})
self._blobs.append({'data': var, 'diff': None})
self._blobs.append({'data': factor, 'diff': None})
def Setup(self, bottom):
super(BatchNormLayer, self).Setup(bottom)
return ops.BatchNorm(bottom + [blob['data'] for blob in self._blobs], **self._param)
class BatchRenormLayer(Layer):
"""The implementation of ``BatchRenormLayer``.
Parameters
----------
use_global_stats : boolean
Refer ``BatchRenormParameter.use_global_stats``.
moving_average_fraction : float
Refer ``BatchRenormParameter.moving_average_fraction``.
eps : float
Refer ``BatchRenormParameter.eps``.
r_max : float
Refer ``BatchRenormParameter.r_max``.
d_max : float
Refer ``BatchRenormParameter.d_max``.
t_delta : float
Refer ``BatchRenormParameter.t_delta``.
"""
def __init__(self, LayerParameter):
super(BatchRenormLayer, self).__init__(LayerParameter)
param = LayerParameter.batch_renorm_param
self._param = {'use_stats': int(param.use_global_stats)
if param.HasField('use_global_stats') else -1,
'momentum': param.moving_average_fraction,
'eps': param.eps,
'r_max': float(param.r_max),
'd_max': float(param.d_max),
't_delta': float(param.t_delta),
'axis': 1,
'mode': 'CAFFE'}
mean = Tensor(LayerParameter.name + '@param0').Constant(value=0.0)
var = Tensor(LayerParameter.name + '@param1').Constant(value=0.0)
factor = Tensor(LayerParameter.name + '@param2').Constant(value=0.0)
self._blobs.append({'data': mean, 'diff': None})
self._blobs.append({'data': var, 'diff': None})
self._blobs.append({'data': factor, 'diff': None})
def Setup(self, bottom):
super(BatchRenormLayer, self).Setup(bottom)
return ops.BatchRenorm(bottom + [blob['data'] for blob in self._blobs], **self._param)
class InstanceNormLayer(Layer):
"""
The implementation of ``InstanceNormLayer``.
Introduced by `[Ulyanov et.al, 2016] <https://arxiv.org/abs/1607.08022>`_
"""
def __init__(self, LayerParameter):
super(InstanceNormLayer, self).__init__(LayerParameter)
self._param = {'axis': 1}
def Setup(self, bottom):
super(InstanceNormLayer, self).Setup(bottom)
return ops.InstanceNorm(bottom[0], **self._param)
class ScaleLayer(Layer):
"""The implementation of ``ScaleLayer``.
Parameters
----------
axis : int
The start axis. Refer `ScaleParameter.axis`_.
num_axes : int
The number of axes. Refer `ScaleParameter.num_axes`_.
filler : FillerParameter
The filler of scale parameter. Refer `ScaleParameter.filler`_.
bias_term : boolean
Whether to use bias. Refer `ScaleParameter.bias_term`_.
bias_filler : FillerParameter
The filler of bias parameter. Refer `ScaleParameter.bias_filler`_.
"""
def __init__(self, LayerParameter):
super(ScaleLayer, self).__init__(LayerParameter)
param = LayerParameter.scale_param
self._param = {'axis': param.axis,
'num_axes': param.num_axes}
scale = Tensor(LayerParameter.name + '@param0')
scale_diff = Tensor(LayerParameter.name + '@param0_grad')
if param.HasField('filler'):
self.Fill(scale, param, 'filler')
else: scale.Constant(value=1.0)
self._blobs.append({'data': scale, 'diff': scale_diff})
if param.bias_term:
bias = Tensor(LayerParameter.name + '@param1')
bias_diff = Tensor(LayerParameter.name + '@param1_grad')
# auto fill 0 if not specficed bias_filler
self.Fill(bias, param, 'bias_filler')
self._blobs.append({'data': bias, 'diff': bias_diff})
def Setup(self, bottom):
super(ScaleLayer, self).Setup(bottom)
return ops.Scale(bottom + [blob['data'] for blob in self._blobs], **self._param)
class BNLayer(Layer):
"""The implementation of ``BNLayer``.
Parameters
----------
use_global_stats : boolean
Refer `BatchNormParameter.use_global_stats`_.
moving_average_fraction : float
Refer `BatchNormParameter.moving_average_fraction`_.
eps : float
Refer `BatchNormParameter.eps`_.
filler : FillerParameter
The filler of scale parameter. Refer `ScaleParameter.filler`_.
bias_filler : FillerParameter
The filler of bias parameter. Refer `ScaleParameter.bias_filler`_.
"""
def __init__(self, LayerParameter):
super(BNLayer, self).__init__(LayerParameter)
bn_param = LayerParameter.batch_norm_param
scale_param = LayerParameter.scale_param
self._param = {'use_stats': int(bn_param.use_global_stats)
if bn_param.HasField('use_global_stats') else -1,
'momentum': bn_param.moving_average_fraction,
'eps': bn_param.eps,
'axis': 1}
mean = Tensor(LayerParameter.name + '@param0').Constant(value=0.0)
var = Tensor(LayerParameter.name + '@param1').Constant(value=0.0)
scale = Tensor(LayerParameter.name + '@param2')
scale_diff = Tensor(LayerParameter.name + '@param2_grad')
bias = Tensor(LayerParameter.name + '@param3')
bias_diff = Tensor(LayerParameter.name + '@param3_grad')
if scale_param.HasField('filler'):
self.Fill(scale, scale_param, 'filler')
else: scale.Constant(value=1.0)
self.Fill(bias, scale_param, 'bias_filler')
self.norm_blobs = [{'data': mean, 'diff': None},
{'data': var, 'diff': None}]
self.scale_blobs = [{'data': scale, 'diff': scale_diff},
{'data': bias, 'diff': bias_diff}]
self._blobs.extend(self.norm_blobs)
self._blobs.extend(self.scale_blobs)
def Setup(self, bottom):
super(BNLayer, self).Setup(bottom)
return ops.FusedBatchNorm(bottom + [blob['data'] for blob in self._blobs], **self._param)
class NormalizeLayer(Layer):
"""The implementation of ``NormalizeLayer``.
Parameters
----------
across_spatial : boolean
Whether to stat spatially. Refer `NormalizeParameter.across_spatial`_.
scale_filler : FillerParameter
The filler of scale parameter. Refer `NormalizeParameter.scale_filler`_.
channel_shared : boolean
Whether to scale across channels. Refer `NormalizeParameter.channel_shared`_.
eps : float
The eps. Refer `NormalizeParameter.eps`_.
"""
def __init__(self, LayerParameter):
super(NormalizeLayer, self).__init__(LayerParameter)
param = LayerParameter.normalize_param
self._l2norm_param = {'axis': 1,
'num_axes': -1 if param.across_spatial else 1,
'eps': param.eps}
self._scale_param = {'axis': 1,
'num_axes': 0 if param.channel_shared else 1}
scale = Tensor(LayerParameter.name + '@param0')
if param.HasField('scale_filler'):
self.Fill(scale, param, 'scale_filler')
else: scale.Constant(value=1.0)
self.scale_blobs = [{'data': scale, 'diff': Tensor(scale.name + '_grad')}]
self._blobs.extend(self.scale_blobs)
def Setup(self, bottom):
super(NormalizeLayer, self).Setup(bottom)
norm_out = [ops.L2Norm(bottom[0], **self._l2norm_param)]
scale_out = ops.Scale(norm_out + [blob['data'] for blob in self.scale_blobs],
**self._scale_param)
return scale_out
class TileLayer(Layer):
"""The extended implementation of ``TileLayer``.
Parameters
----------
multiples : caffe_pb2.BlobShape
The multiples. Refer `TileParameter.multiples`_.
"""
def __init__(self, LayerParameter):
super(TileLayer, self).__init__(LayerParameter)
param = LayerParameter.tile_param
multiples = param.multiples
self._param = {'multiples': [int(multiple) for multiple in multiples.dim]}
def Setup(self, bottom):
super(TileLayer, self).Setup(bottom)
input = bottom[0] if isinstance(bottom, list) else bottom
return ops.Tile(input, **self._param)
class ReductionLayer(Layer):
"""The extended implementation of ``ReductionLayer``.
Parameters
----------
operation : caffe_pb2.ReductionOp
The operation. Refer `ReductionParameter.operation`_.
axis : int
The axis to to reduce. Refer `ReductionParameter.axis`_.
"""
def __init__(self, LayerParameter):
super(ReductionLayer, self).__init__(LayerParameter)
param = LayerParameter.reduction_param
if param.axis < 0:
if param.axis != -1:
raise ValueError('The negative axis can only be -1(reduce all).')
self._param = {'operation': {1: 'SUM', 4: 'MEAN'}[param.operation],
'axis': param.axis}
def Setup(self, bottom):
super(ReductionLayer, self).Setup(bottom)
input = bottom[0] if isinstance(bottom, list) else bottom
return ops.Reduce(input, **self._param)
class ExpandDimsLayer(Layer):
"""The implementation of ``ExpandDimsLayer``.
Parameters
----------
axis : int
This axis to expand at. Refer `ExpandDimsParameter.axis`_.
"""
def __init__(self, LayerParameter):
super(ExpandDimsLayer, self).__init__(LayerParameter)
param = LayerParameter.expand_dims_param
self._param = {'axis': param.axis}
def Setup(self, bottom):
super(ExpandDimsLayer, self).Setup(bottom)
input = bottom[0] if isinstance(bottom, list) else bottom
return ops.ExpandDims(input, **self._param)
class StopGradientLayer(Layer):
"""
The implementation of ``StopGradientLayer``.
"""
def __init__(self, LayerParameter):
super(StopGradientLayer, self).__init__(LayerParameter)
def Setup(self, bottom):
super(StopGradientLayer, self).Setup(bottom)
input = bottom[0] if isinstance(bottom, list) else bottom
return ops.StopGradient(input, **self._param)
class ProposalLayer(Layer):
"""The implementation of ``ProposalLayer``.
Parameters
----------
stride : list of int
The stride of anchors. Refer ``ProposalParameter.stride``.
scale : list of float
The scales of anchors. Refer `ProposalParameter.scale`_.
ratio : list of float
The ratios of anchors. Refer `ProposalParameter.ratio`_.
pre_nms_top_n : int
The num of anchors before nms. Refer `ProposalParameter.pre_nms_topn`_.
post_nms_top_n : int
The num of anchors after nms. Refer `ProposalParameter.post_nms_topn`_.
nms_thresh : float
The threshold of nms. Refer `ProposalParameter.nms_thresh`_.
min_size : int
The min size of anchors. Refer `ProposalParameter.min_size`_.
min_level : int
Finest level of the FPN pyramid. Refer ``ProposalParameter.min_level``.
max_level : int
Coarsest level of the FPN pyramid. Refer ``ProposalParameter.max_level``.
canonical_scale : int
The baseline scale of mapping policy. Refer ``ProposalParameter.canonical_scale``.
canonical_level : int
Heuristic level of the canonical scale. Refer ``ProposalParameter.canonical_level``.
"""
def __init__(self, LayerParameter):
super(ProposalLayer, self).__init__(LayerParameter)
param = LayerParameter.proposal_param
self._param = {'strides': param.stride,
'ratios': param.ratio,
'scales': param.scale,
'pre_nms_top_n': param.pre_nms_top_n,
'post_nms_top_n': param.post_nms_top_n,
'nms_thresh': param.nms_thresh,
'min_size': param.min_size,
'min_level': param.min_level,
'max_level': param.max_level,
'canonical_scale': param.canonical_scale,
'canonical_level': param.canonical_level}
def Setup(self, bottom):
super(ProposalLayer, self).Setup(bottom)
return ops.Proposal(bottom, **self._param)
|
|
import time
import re
import inspect
import numpy as np
import pandas as pd
from scipy import stats, optimize, interpolate, signal, linalg
import patsy as ps
import statsmodels.api as sm
import sklearn.metrics as met
import sklearn.linear_model as lm
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin
def validpar(func, x):
'''
Avoid invalid parameters for a function.
Parameters
----------
func : function
Which cannot accept invalid parameters.
x : dict
Including pairs of parameter name and value.
Returns
-------
op : dict
Valid parameters for the function.
'''
op = dict(i for i in x.items() if i[0] in inspect.signature(func).parameters.keys())
return(op)
def strnum(x, func_reduce=max):
'''
Convert string to number, only keep numerical part (0-9 or .), otherwise return np.nan.
Parameters
----------
x : string
To convert to numbers.
func_reduce : function
Function to reduce multiple numbers.
Returns
-------
op : float
'''
if isinstance(x, str):
numL = re.findall(r'\d[\.\d]+', x)
if numL:
return(func_reduce([float(i) for i in numL]))
else:
return(np.nan)
else:
return(x)
def plyargs(func, argL, argname, f_concat=list, argcon={}, **kwargs):
'''
Apply function on a list of arguments.
Parameters
----------
func : function
To apply arguments.
argL : list (of lists)
Iterated argument values.
argname : list
Argument names, same length as argL.
f_concat : function
To concat iterated outputs of the function.
argcon : dict
arguments of f_con
Additional keyword arguments will be passed as keywords to the function.
Returns
-------
op : list-like
iterated outputs of the function.
'''
op = f_concat(map(lambda arg: func(**dict(zip(*[argname, arg])), **kwargs), zip(*argL)), **argcon)
return(op)
def apply_df(df, func, axis=0, n_jobs=1, **kwargs):
'''
Apply function on each row/column of dataframe and return a Series.
Parameters
----------
df : DataFrame
To apply function.
func : function
Function with input (index, Series) to apply to each column/row.
axis : {0, 1}, default 0
- 0 or 'index': apply function along rows.
- 1 or 'columns': apply function along columns.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel.
Additional keyword arguments will be passed as keywords to the function.
Returns
-------
S : list-like
iterated outputs of f.
'''
if axis == 0:
pairs = df.iteritems()
index = df.columns
else:
pairs = df.iterrows()
index = df.index
S = pd.Series([func(i_val) for i, i_val in pairs], index, **kwargs)
return(S)
def cross_join(left, right, combine_index=True):
'''
Cross outer join of two DataFrames along column.
Parameters
----------
left : DataFrame
right : DataFrame
combine_index : bool
Whether combine index name of two DataFrames
Returns
-------
DataFrame
'''
out = pd.merge(left.assign(_key=1), right.assign(_key=1), on="_key").drop("_key", axis=1)
if combine_index:
out.index = ["{}_{}".format(i, j) for i in left.index for j in right.index]
return(out)
def collinearvif(df):
'''
Compute variance inflation factor (VIF) of a DataFrame.
Parameters
----------
df : DataFrame
Returns
-------
op : Series
VIF values of each column.
'''
op = pd.Series(np.diag(np.linalg.inv(df.corr())), df.columns, name="CollinearVIF")
return(op)
def summary(df, n=5, pct=[0.1, 0.5, 0.9]):
'''
Summarize DataFrame along columns for data type, sample size, numerical statistics and frequency
Parameters
----------
df : DataFrame
To summary.
n : int
The number of foremost frequent categories of frequency table.
pct : list
Percentiles for numerical statistics.
Returns
-------
op : DataFrame
summary of DataFrame along columns.
'''
def freq(s):
op = pd.value_counts(s)
op = pd.concat([pd.Series(op.index[:n]).rename(lambda x: "FreqCat{}".format(x+1)),
pd.Series(op.values[:n]).rename(lambda x: "FreqVal{}".format(x+1)).T,
pd.Series(op.iloc[n:].sum(), index=["Freq_Others"])])
return(op)
op = pd.concat([df.dtypes.rename("Type"),
df.notnull().sum().rename("N"),
df.describe(pct).iloc[1:].T,
df.apply(freq).T], axis=1).loc[df.columns]
return(op)
## Clean Data
def CLtimenum(s):
'''Convert datetime variable to numeric variable.'''
op = pd.DatetimeIndex(s)
op = pd.get_dummies(pd.DataFrame(dict(zip(*[["{}__{}".format(s.name, i) for i in ["Month", "Year"]], [op.month, op.year]])), index=s.index).astype("O"))
return(op)
def CLnormal(df):
'''
Normalize the distribution of numeric DataFrame to standard normal distribution by rank.
See also
--------
CLscale
'''
op = pd.DataFrame(stats.norm.ppf((df.rank(pct = True) - 0.5/df.shape[0]).fillna(0.5)), index=df.index, columns=df.columns)
return(op)
def CLscale(df):
'''
Standardize the distribution of numeric DataFrame to mean 0 and standard deviation 1.
See also
--------
CLnormal
'''
op = ((df - df.mean())/df.std()).fillna(0)
return(op)
def CLsparse_cat(s, sp=0.01):
'''
Clean a object Series with sparse values converted to "others".
'''
sfreq = pd.value_counts(s) > sp*s.shape[0]
op = s.where(s.isin(sfreq.index[sfreq]), "others")
return(op)
def CLsparse(s):
'''
Check the percentage of non-most-frequent and non-null values in a Series.
'''
op = 1 - s.isnull().mean()
if op > 0:
op -= pd.value_counts(s).iloc[0]/s.shape[0]
return(op)
def CLdata(df, sp=0, cor=1, f_norm=CLscale, formula=None, **kwargs):
'''
Clean a pandas DataFrame according to dtype (numeric or object)
Parameters
----------
df : DataFrame
data to clean.
sp : float
between [0, 1), threshold of ratio of unique and missing values to delete variables.
cor : float
between (0, 1], threshold of absolute correlation to remove later collinear variables.
f_norm : function
use to standardize numeric variables.
formula : str
add transformed or interaction terms of variables.
Returns
-------
df_clean : DataFrame
cleaned data
'''
df_num = df.select_dtypes(["number"])
df_cat = df.select_dtypes(["object"])
if(df_cat.shape[1]):
df_cat = pd.get_dummies(df_cat.apply(CLsparse_cat, sp = sp))
if(df_num.shape[1] and f_norm):
df_num = f_norm(df_num)
df_clean = pd.concat([df_num, df_cat], axis=1)
if(sp):
df_clean = df_clean.loc[:, df_clean.apply(CLsparse)>sp]
if(cor < 1):
abscor = np.abs(np.tril(np.corrcoef(df_clean, rowvar=0), -1))
df_clean = df_clean.loc[:, np.all(abscor<cor, axis=1)]
if(formula):
df_clean = ps.dmatrix(("0"+"+{}"*df_clean.shape[1]+formula).format(*df_clean.columns), data=df_clean, return_type="dataframe")
return(df_clean)
## Machine Learning Procedures
def MDinit(f_model=lm.LogisticRegression, par_model={}, random_state=0, **kwargs):
'''
Initiate a model of scikit-learn form.
Parameters
----------
random_state : int or list
the seed used by the random number generator. If a list, create a list of models.
f_model : function
model to train.
par_model : dict
arguments to initiate a f_model.
Returns
-------
out: initiated model or model list
'''
if hasattr(random_state, "__iter__"):
return([MDinit(f_model, par_model, i) for i in random_state])
else:
par = {'learning_rate': 0.05, 'n_jobs': -1, "class_weight": 'balanced'}
par.update(par_model)
return(f_model(**validpar(f_model, {**par_model, "random_state": random_state, "random_state": random_state})))
def MDfit(model, xt, yt, xv=None, yv=None, par_fit={}, **kwargs):
'''
Train a model of scikit-learn form
Parameters
----------
xt : DataFrame
training X set.
xv : DataFrame
validation X set.
yt : DataFrame
training Y set.
yv : DataFrame
validation Y set.
random_state : int
the seed used by the random number generator.
model : model to train
par_fit : dict
arguments to train f_model.
Returns
-------
model : trained model
'''
par = {"eval_set": [(xv, yv.iloc[:,0])], 'verbose': False}
par.update(par_fit)
model.fit(xt, yt.iloc[:,0], **validpar(model.fit, par))
return(model)
def MDpred(model, xv, ic_offset=[], f_loss=met.roc_auc_score, logit=False, **kwargs):
'''
Use trained model and validation X to predict Y.
Parameters
----------
model : trained model
xv : DataFrame
validation X to predict Y.
ic_offset : str
offset column name of X for Y.
Returns
-------
yvp : DataFrame
predicted Y for validation set.
'''
xvo = xv
if len(ic_offset):
xv = xv.drop(ic_offset, axis=1)
if type(model).__name__ == 'GLMResultsWrapper':
xv = xv.assign(_Int_ = 1).rename(columns={"_Int_": "(Intercept)"})
if ((type(model).__name__ == 'LogisticRegression')|('Classifier' in type(model).__name__)) & (f_loss.__name__ in ["roc_auc_score"]):
yvp = model.predict_proba(xv)[:,1]
elif (type(model).__name__ == 'GLMResultsWrapper') & len(ic_offset):
yvp = model.predict(xv, offset=xvo.loc[:,ic_offset])
else:
yvp = model.predict(xv)
if (f_loss.__name__ in ["roc_auc_score", "log_loss"]) and logit:
yvp = np.log(1/(1/(yvp+1e-7) - 1))
op = pd.DataFrame(yvp, index=xv.index)
return(op)
def MDweight(model, xt, ic_offset=[], **kwargs):
'''
Use trained model and X to get variable weights.
Parameters
----------
model : trained model
xt : DataFrame
get variable name from training X set.
Returns
-------
op : Series
variable weights of the model.
'''
if type(model).__name__ in ['Ridge', "Lasso"]:
op = pd.Series(model.coef_, index=xt.drop(ic_offset, axis=1).columns)
elif type(model).__name__ in ['LogisticRegression']:
op = pd.Series(model.coef_[0], index=xt.drop(ic_offset, axis=1).columns)
elif 'sklearn.ensemble' in type(model).__module__:
op = pd.Series(model.feature_importances_, index=xt.drop(ic_offset, axis=1).columns)
elif type(model).__name__ in ["GLMResultsWrapper"]:
op = pd.concat([model.params, model.t()], axis=1, keys=["coef", "t"])
else:
op = pd.Series()
return(op)
def Loss(yv, yp, f_loss=met.roc_auc_score, **kwargs):
'''
Get loss between true validation Y and predicted Y
Parameters
----------
yv : DataFrame
validation Y.
yp : DataFrame
predicted Y.
f_loss : function
use to calculate loss.
Returns
-------
op : float
loss
'''
op = f_loss(np.array(yv).flatten(), np.array(yp).flatten())
return(op)
def roc(yv, yp, plot=False, **kwargs):
'''
Get ROC curve between binary validation Y and predicted Y.
Parameters
----------
yv : DataFrame
binary validation Y.
yp : DataFrame
predicted Y.
plot : bool
whether plot a ROC curve.
Returns
-------
op : DataFrame
elements of an ROC curve including: true positive ratio, false positive ratio, and threshold.
'''
op = pd.DataFrame(dict(zip(*[["FPR", "TPR", "Threshold"], met.roc_curve(yv, yp)])))
if plot:
op.set_index("FPR").plot(ylim=[0, 1], title="AUC: {:.4f}".format(met.auc(op["FPR"], op["TPR"])))
return(op)
## Model Analysis
def MDweight_analysis(model, xt, **kwargs):
w = MDweight(model, xt, **kwargs)
if 'sklearn.ensemble' in type(model).__module__:
p0 = 1/len(w)
n_split = sum((i.tree_.feature != -2).sum() for i in np.array(model.estimators_).flatten())
op = w.to_frame("freq")
op["std"] = np.sqrt(w*(1-w)/n_split)
op["Z-score"] = (w - p0)/np.sqrt(p0*(1-p0)/n_split)
op["p-value"] = stats.norm.cdf(-op["Z-score"])
return(op)
def tree_set(tree_, max_depth = 5):
def recurse_set(node, node_set_parent, depth):
name = tree_.feature[node]
node_set = [{name}]
for i in node_set_parent:
node_set_i = i.copy()
node_set_i.add(name)
node_set.append(node_set_i)
if depth < max_depth:
node_lchild = recurse_set(tree_.children_left[node], node_set, depth+1) if tree_.feature[tree_.children_left[node]] != -2 else []
node_rchild = recurse_set(tree_.children_right[node], node_set, depth+1) if tree_.feature[tree_.children_right[node]] != -2 else []
node_set += node_lchild
node_set += node_rchild
return(node_set)
return(recurse_set(0, [], 1))
def MDforest_set(model, xt = None, max_depth = 5, alpha = 0.05):
asso_S = pd.value_counts(tuple(j) for i in np.array(model.estimators_).flatten() for j in tree_set(i.tree_, max_depth))
op = []
ic_name = range(model.n_features_) if xt is None else xt.columns.tolist()
for i, i_val in asso_S.groupby(asso_S.rename(len).index):
i_val = i_val.rename(lambda x: ic_name[x[0]]) if i == 1 else pd.Series(i_val.values, [tuple(ic_name[j] for j in x) for x in i_val.index])
p0 = 1/misc.comb(model.n_features_, i)
freq0 = i_val.sum()*p0
if alpha >= 0:
i_val = i_val[i_val > np.ceil(freq0-stats.norm.ppf(alpha*p0)*np.sqrt(freq0*(1-p0)))]
op.append(i_val)
return(op)
## Cross-Validation
def DMinit(mdset_df, mdpar_df, combine_index=True):
'''
Initiate a DataFrame-based CV model set.
Parameters
----------
mdset_df : DataFrame
for CV data.
mdpar_df : DataFrame
for model parameters.
Returns
-------
out: DataFrame
cross-joined CV data-model list.
'''
md_df = cross_join(mdset_df, mdpar_df, combine_index=combine_index)
md_df['random_state'] = md_df['irts'].apply(np.unique)
md_df["modelL"] = md_df.apply(lambda x: MDinit(**x.to_dict()), axis=1)
return(md_df.drop(["random_state", "f_model", "par_model"], axis=1))
def Kfolds(x, k=10, random_state=1):
'''
Use index to create a list of K-folds cross-validation indices.
Parameters
----------
x : list
sample index.
k : int
number of folds.
random_state : int
the seed used by the random number generator.
Returns
-------
op : array
'''
np.random.seed(random_state)
op = np.random.permutation(np.arange(len(x))) % k
return(op)
def CVdata(df, ic_x=[], ic_y=[], ir=None, k=10, f_norm_y=lambda x: x, random_state=0, **kwargs):
'''
Create a dict of cross-validation dataset for models
Parameters
----------
df : DataFrame
data to clean
ic_x : list
column names used as X
ic_y : list
column names used as Y
ir : list
index of sub-sample
k : int
number of cross-validation folds
Returns
-------
op : dict, including following three keys:
X : DataFrame, independent variables
Y : DataFrame, depedent variable(s)
irts : 1-d array, cross-validation group index
'''
if(ir is None):
ir = df.index
op = {"X": CLdata(df.loc[ir].reindex(columns=ic_x, fill_value=0), **kwargs),
"Y": f_norm_y(df.loc[ir, ic_y]),
"irts": Kfolds(x=ir, k=k, random_state=random_state)}
return(op)
def CVset(X, Y, irts, ig=0, ictL=None, **kwargs):
'''
Create a training-validation set from X, Y and cross-validation indices
Parameters
----------
X : DataFrame
indepedent variables
Y : DataFrame
dependent variable(s)
irts : list-like
cross-validation indices
ig : int
fold to use as validation index, and others as training index
ictL : DataFrame
X columns bool indcators for each set in CV
Returns
-------
op : dict
including following four keys:
xt : DataFrame
training X set
xv : DataFrame
validation X set
yt : DataFrame
training Y set
yv : DataFrame
validation Y set
'''
irv = irts == ig
op = {"yt": Y.loc[~irv], "yv": Y.loc[irv]}
if(ictL is None):
op.update({"xt": X.loc[~irv], "xv": X.loc[irv]})
else:
ict = ictL.iloc[:, ig]
op.update({"xt": X.loc[irt, ict], "xv": X.loc[irv, ict]})
return(op)
def CVset_df(X, Y, irts, ig=None, **kwargs):
'''
Create a DataFrame-form training-validation set from X, Y and cross-validation indices
Parameters
----------
X : DataFrame
indepedent variables
Y : DataFrame
dependent variable(s)
irts : list-like
cross-validation indices
ig : int
fold to use as validation index, and others as training index. if None, use all folds
Returns
-------
op : DataFrame
columns including following four keys:
xt : DataFrame
training X set
xv : DataFrame
validation X set
yt : DataFrame
training Y set
yv : DataFrame
validation Y set
'''
if ig is None:
return(pd.DataFrame.from_dict([CVset(X=X, Y=Y, irts=irts, ig=i, **kwargs) for i in np.unique(irts)]))
else:
return(pd.DataFrame.from_dict([CVset(X=X, Y=Y, irts=irts, ig=ig, **kwargs)]))
def CVply(func, irts, parcv={}, f_con=list, argcon={}, **kwargs):
'''
Apply function on cross-validation data sets
Parameters
----------
func : function
to apply cross-validation arguments
irts : Series
cross-validation group index
parcv : dict
- keys: argument names of the function to iterate
- values: argument names of kwargs to iterate
f_con : function
to concat iterated outputs of the function
argcon : dict
arguments of f_con
Additional keyword arguments will be passed as keywords to the function.
Returns
-------
op : list
iterated outputs of the function
'''
parcv = dict([[x, kwargs.pop(parcv[x])] for x in parcv.keys()])
op = f_con(map(lambda i: func(random_state=i, **CVset(ig=i, irts=irts, **kwargs), **dict([[x, parcv[x][i]] for x in parcv.keys()]), **kwargs), np.unique(irts)), **argcon)
return(op)
def CVweight(wL, family="normal", **kwargs):
'''
Analysis and test variable weights of cross-validation models
Parameters
----------
wL : DataFrame
variable weights of the cross-validation models
family : str
distribution types of variable weights
Returns
-------
op : DataFrame
the columns are Mean, Std, P-value, LowerCI and UpperCI of variable weights
'''
if(family == "normal"):
op = pd.concat([wL.mean(axis=1), wL.std(axis=1)], axis=1, keys=["Mean", "Std"])
sd = op["Std"]*np.sqrt(wL.shape[1])
op["P-value"] = 2*stats.t.cdf(-np.abs(op["Mean"]/sd), wL.shape[1]-1)
elif(family == "binomial"):
op = wL.sum(axis=1)
sd = wL.std(axis=1)/op.mean()
op = pd.concat([op/op.mean(), sd], axis=1, keys=["Mean", "Std"])
op["P-value"] = stats.norm.cdf(-(op["Mean"] - 1)/op["Std"])
op = op.assign(LowerCI=op["Mean"]-1.96*sd, UpperCI=op["Mean"]+1.96*sd)
return(op)
class LinearClass(BaseEstimator, RegressorMixin):
def __init__(self):
pass
def summary(self):
model_t = stats.t(df=self.dof)
out_S = pd.Series([self.sigma2, len(self.w), self.dof], ["Total Sigma2", "Parameters", "Degree of freedom"])
out_df = pd.DataFrame({"coef": self.w, "se": self.w_se}, self.columns)
out_df["t"] = out_df["coef"]/out_df["se"]
out_df["p-value"] = 2*model_t.cdf(-np.abs(out_df["t"]))
out_df["0.025 CI"] = out_df["coef"]+model_t.ppf(0.025)*out_df["se"]
out_df["0.975 CI"] = out_df["coef"]+model_t.ppf(0.975)*out_df["se"]
return(out_S, out_df)
class LinearSingleModel(LinearClass):
def fit(self, X, Y, X_offset=None):
self.columns = X.columns
if X_offset is not None:
X0 = np.hstack([np.ones([Y.shape[0], 1], dtype="bool"), X_offset.values])
else:
X0 = np.ones([Y.shape[0], 1], dtype="bool")
Y = Y.values
X = X.values
XX_A = X0.T @ X0
XX_B = X0.T @ X
XX_c = (X ** 2).sum(axis=0)
XY_a = X0.T @ Y
XY_b = X.T @ Y
XX_Ainv = np.linalg.inv(XX_A)
XX_AinvB = XX_Ainv @ XX_B
self.w0 = XX_Ainv @ XY_a
self.dof = len(X) - len(self.w0) - 1
self.sigma2 = ((Y - X0 @ self.w0)**2).sum()/self.dof
XXinv_c = 1/(XX_c - (XX_B * XX_AinvB).sum(axis=0))
XXinv_B = - XXinv_c * XX_AinvB
# XXinv_Adiag = np.diag(XX_Ainv)[:, np.newaxis] + XXinv_c * (XX_AinvB ** 2)
# self.W0 = self.w0 + XXinv_c * XX_AinvB * (XX_AinvB.T @ XY_a)[:,0] + XXinv_B * XY_b.T
self.w = (XY_a.T @ XXinv_B + XXinv_c * XY_b.T)[0]
self.w_se = np.sqrt(XXinv_c * self.sigma2)
return(self)
class LinearMixedModel(LinearClass):
def __init__(self):
self.G = None
def fit_kernel(self, X_g, w_g=None, scale=False):
'''
Create standardized kernel for linear mixed model
Parameters
----------
X_g : DataFrame
of shape (n, p), random effects matrix
w_g : 1d array
of shape (p,), scale of columns.
scale : bool
whether scale columns with mean 0 and std 1
Attributes
----------
G : 2d array
of shape (n, n), kernel of random effects in linear mixed model
'''
if scale:
X_g = CLscale(X_g) / np.sqrt(X_g.shape[1])
if w_g is None:
self.G = X_g.dot(X_g.T)
else:
X_g *= w_g
self.G = X_g.dot(X_g.T) / (w_g ** 2).mean()
return(self)
def fit_coef(self, h2):
D = 1 + h2 * (self.S - 1)
XDX = (self.UX.T / D) @ self.UX
XDY = (self.UX.T / D) @ self.UY
self.w = np.linalg.solve(XDX, XDY)
self.sigma2 = ((self.UY - self.UX @ self.w).T ** 2 / D).mean()
return((np.log(D*self.sigma2).sum() + np.linalg.slogdet(XDX / self.sigma2)[1] - np.linalg.slogdet(self.XX)[1]) / len(self.S))
def fit(self, X, Y, **kwargs):
'''
Factored spectrally transformed linear mixed models
Parameters
----------
X : DataFrame
of shape (n, p), covariate matrix
Y : DataFrame
of shape (n, 1), response variable
Attributes
----------
w : coefficients for X
sigma2 : total variance of residuals
h2 : heritability of random effects
loss : -2 RE log-likelihood divided by n
'''
self.columns = np.insert(X.columns, 0, "(Intercept)")
X_c = np.hstack([np.ones([X.shape[0], 1], dtype="bool"), X.values])
if self.G is None:
self.S, U = np.ones(len(Y)), np.diag(np.ones(len(Y), dtype="bool"))
else:
self.S, U = np.linalg.eigh(self.G.loc[Y.index].values)
self.dof = X.shape[0] - X.shape[1] - 1
self.UX = U.T @ X_c
self.UY = U.T @ Y.values
self.XX = X_c.T @ X_c
if self.G is None:
self.h2 = 0
else:
model = optimize.differential_evolution(self.fit_coef, bounds=[(0, 1)], tol=1e-7, **kwargs)
self.h2 = model.x[0]
self.dof -= 1
self.loss = self.fit_coef(self.h2)
self.w = self.w.flatten()
self.w_se = np.sqrt(np.diag(np.linalg.inv(self.XX)) * (1 - self.h2) * self.sigma2)
return(self)
def predict(self, X):
return(self.w.values[0]+X.dot(self.w.values[1:]))
class LinearEquation(LinearClass):
def fit(self, X, **kwargs):
self.columns = X.columns
self.w_scale = X.shape[1]
self.w_se = 1
X_scale = X.values / np.sqrt(X.shape[0] * self.w_scale)
def loss(w):
y = X_scale @ w
return(y.dot(y), 2 * X_scale.T @ y)
def constraint(w):
out = w.dot(w)
return(out - self.w_scale)
def constraint_grad(w):
return(2 * w)
self.model = optimize.minimize(loss, np.repeat(1, X.shape[1]), jac=True, constraints={"type": "eq", "fun": constraint, "jac": constraint_grad}, **kwargs)
self.w = self.model["x"]
self.dof = X.shape[0] - X.shape[1] - 1
self.sigma2 = self.model["fun"]
self.success = self.model["success"]
def predict(self, X):
return(X @ self.w)
def LinearEquationNet(X, min_rsq=0.2, alpha=0, verbose=0, **kwargs):
S_out_L = []
df_out_L = []
ic_L = []
X_iter = X.copy()
model = LinearEquation()
model.fit(X_iter, alpha=alpha, **kwargs)
S_out, df_out = model.summary()
while S_out["sigma2"] < 1 - min_rsq:
ic = df_out.sort_values("p-value").index[0]
ic_L.append(ic)
S_out["w_l1"] = np.abs(df_out.loc[ic, "coef"]) / np.abs(df_out["coef"]).sum()
S_out["w_l2"] = df_out.loc[ic, "coef"] ** 2 / (df_out["coef"] ** 2).sum()
S_out["success"] = model.success
S_out_L.append(S_out)
df_out_L.append(df_out)
X_iter = X_iter.drop(ic, axis=1)
model.fit(X_iter, **kwargs)
S_out, df_out = model.summary()
if verbose > 0:
print("Iteration {}: {}, Rsq: {:.3f}, success: {}".format(len(ic_L), ic, 1 - S_out["Total Sigma2"], model.model["success"]))
df_out = pd.concat(S_out_L, axis=1, keys=ic_L).T
graph_out = pd.concat(map(lambda x: x["coef"], df_out_L), axis=1, keys=ic_L)
return(df_out, graph_out)
def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising', kpsh=False, valley=False):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
Notes
-----
The detection of valleys instead of peaks is performed internally by simply
negating the data: `ind_valleys = detect_peaks(-x)`
The function can handle NaN's
See this IPython Notebook [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
Examples
--------
>>> from detect_peaks import detect_peaks
>>> x = np.random.randn(100)
>>> x[60:81] = np.nan
>>> # detect all peaks and plot data
>>> ind = detect_peaks(x, show=True)
>>> print(ind)
>>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
>>> # set minimum peak height = 0 and minimum peak distance = 20
>>> detect_peaks(x, mph=0, mpd=20, show=True)
>>> x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0]
>>> # set minimum peak distance = 2
>>> detect_peaks(x, mpd=2, show=True)
>>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
>>> # detection of valleys instead of peaks
>>> detect_peaks(x, mph=0, mpd=20, valley=True, show=True)
>>> x = [0, 1, 1, 0, 1, 1, 0]
>>> # detect both edges
>>> detect_peaks(x, edge='both', show=True)
>>> x = [-2, 1, -2, 2, 1, 1, 3, 0]
>>> # set threshold = 2
>>> detect_peaks(x, threshold = 2, show=True)
"""
__author__ = "Marcos Duarte, https://github.com/demotu/BMC"
__version__ = "1.0.4"
__license__ = "MIT"
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indexes of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size-1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) & (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indexes by their occurrence
ind = np.sort(ind[~idel])
return(ind)
|
|
"""Template platform that aggregates meteorological data."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.weather import (
ATTR_CONDITION_CLEAR_NIGHT,
ATTR_CONDITION_CLOUDY,
ATTR_CONDITION_EXCEPTIONAL,
ATTR_CONDITION_FOG,
ATTR_CONDITION_HAIL,
ATTR_CONDITION_LIGHTNING,
ATTR_CONDITION_LIGHTNING_RAINY,
ATTR_CONDITION_PARTLYCLOUDY,
ATTR_CONDITION_POURING,
ATTR_CONDITION_RAINY,
ATTR_CONDITION_SNOWY,
ATTR_CONDITION_SNOWY_RAINY,
ATTR_CONDITION_SUNNY,
ATTR_CONDITION_WINDY,
ATTR_CONDITION_WINDY_VARIANT,
ENTITY_ID_FORMAT,
WeatherEntity,
)
from homeassistant.const import CONF_NAME, CONF_UNIQUE_ID
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from .template_entity import TemplateEntity, rewrite_common_legacy_to_modern_conf
CONDITION_CLASSES = {
ATTR_CONDITION_CLEAR_NIGHT,
ATTR_CONDITION_CLOUDY,
ATTR_CONDITION_FOG,
ATTR_CONDITION_HAIL,
ATTR_CONDITION_LIGHTNING,
ATTR_CONDITION_LIGHTNING_RAINY,
ATTR_CONDITION_PARTLYCLOUDY,
ATTR_CONDITION_POURING,
ATTR_CONDITION_RAINY,
ATTR_CONDITION_SNOWY,
ATTR_CONDITION_SNOWY_RAINY,
ATTR_CONDITION_SUNNY,
ATTR_CONDITION_WINDY,
ATTR_CONDITION_WINDY_VARIANT,
ATTR_CONDITION_EXCEPTIONAL,
}
CONF_WEATHER = "weather"
CONF_TEMPERATURE_TEMPLATE = "temperature_template"
CONF_HUMIDITY_TEMPLATE = "humidity_template"
CONF_CONDITION_TEMPLATE = "condition_template"
CONF_ATTRIBUTION_TEMPLATE = "attribution_template"
CONF_PRESSURE_TEMPLATE = "pressure_template"
CONF_WIND_SPEED_TEMPLATE = "wind_speed_template"
CONF_WIND_BEARING_TEMPLATE = "wind_bearing_template"
CONF_OZONE_TEMPLATE = "ozone_template"
CONF_VISIBILITY_TEMPLATE = "visibility_template"
CONF_FORECAST_TEMPLATE = "forecast_template"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_CONDITION_TEMPLATE): cv.template,
vol.Required(CONF_TEMPERATURE_TEMPLATE): cv.template,
vol.Required(CONF_HUMIDITY_TEMPLATE): cv.template,
vol.Optional(CONF_ATTRIBUTION_TEMPLATE): cv.template,
vol.Optional(CONF_PRESSURE_TEMPLATE): cv.template,
vol.Optional(CONF_WIND_SPEED_TEMPLATE): cv.template,
vol.Optional(CONF_WIND_BEARING_TEMPLATE): cv.template,
vol.Optional(CONF_OZONE_TEMPLATE): cv.template,
vol.Optional(CONF_VISIBILITY_TEMPLATE): cv.template,
vol.Optional(CONF_FORECAST_TEMPLATE): cv.template,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Template weather."""
config = rewrite_common_legacy_to_modern_conf(config)
unique_id = config.get(CONF_UNIQUE_ID)
async_add_entities(
[
WeatherTemplate(
hass,
config,
unique_id,
)
]
)
class WeatherTemplate(TemplateEntity, WeatherEntity):
"""Representation of a weather condition."""
def __init__(
self,
hass,
config,
unique_id,
):
"""Initialize the Template weather."""
super().__init__(hass, config=config, unique_id=unique_id)
name = self._attr_name
self._condition_template = config[CONF_CONDITION_TEMPLATE]
self._temperature_template = config[CONF_TEMPERATURE_TEMPLATE]
self._humidity_template = config[CONF_HUMIDITY_TEMPLATE]
self._attribution_template = config.get(CONF_ATTRIBUTION_TEMPLATE)
self._pressure_template = config.get(CONF_PRESSURE_TEMPLATE)
self._wind_speed_template = config.get(CONF_WIND_SPEED_TEMPLATE)
self._wind_bearing_template = config.get(CONF_WIND_BEARING_TEMPLATE)
self._ozone_template = config.get(CONF_OZONE_TEMPLATE)
self._visibility_template = config.get(CONF_VISIBILITY_TEMPLATE)
self._forecast_template = config.get(CONF_FORECAST_TEMPLATE)
self.entity_id = async_generate_entity_id(ENTITY_ID_FORMAT, name, hass=hass)
self._condition = None
self._temperature = None
self._humidity = None
self._attribution = None
self._pressure = None
self._wind_speed = None
self._wind_bearing = None
self._ozone = None
self._visibility = None
self._forecast = []
@property
def condition(self):
"""Return the current condition."""
return self._condition
@property
def temperature(self):
"""Return the temperature."""
return self._temperature
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self.hass.config.units.temperature_unit
@property
def humidity(self):
"""Return the humidity."""
return self._humidity
@property
def wind_speed(self):
"""Return the wind speed."""
return self._wind_speed
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self._wind_bearing
@property
def ozone(self):
"""Return the ozone level."""
return self._ozone
@property
def visibility(self):
"""Return the visibility."""
return self._visibility
@property
def pressure(self):
"""Return the air pressure."""
return self._pressure
@property
def forecast(self):
"""Return the forecast."""
return self._forecast
@property
def attribution(self):
"""Return the attribution."""
if self._attribution is None:
return "Powered by Home Assistant"
return self._attribution
async def async_added_to_hass(self):
"""Register callbacks."""
if self._condition_template:
self.add_template_attribute(
"_condition",
self._condition_template,
lambda condition: condition if condition in CONDITION_CLASSES else None,
)
if self._temperature_template:
self.add_template_attribute(
"_temperature",
self._temperature_template,
)
if self._humidity_template:
self.add_template_attribute(
"_humidity",
self._humidity_template,
)
if self._attribution_template:
self.add_template_attribute(
"_attribution",
self._attribution_template,
)
if self._pressure_template:
self.add_template_attribute(
"_pressure",
self._pressure_template,
)
if self._wind_speed_template:
self.add_template_attribute(
"_wind_speed",
self._wind_speed_template,
)
if self._wind_bearing_template:
self.add_template_attribute(
"_wind_bearing",
self._wind_bearing_template,
)
if self._ozone_template:
self.add_template_attribute(
"_ozone",
self._ozone_template,
)
if self._visibility_template:
self.add_template_attribute(
"_visibility",
self._visibility_template,
)
if self._forecast_template:
self.add_template_attribute(
"_forecast",
self._forecast_template,
)
await super().async_added_to_hass()
|
|
from datetime import datetime, timedelta
import logging
from django.core.cache import cache
from django.db import models
import bleach
import caching.base as caching
from tower import ugettext_lazy as _
import amo.models
from amo import helpers
from amo.celery import task
from translations.fields import save_signal, TranslatedField
from users.models import UserProfile
log = logging.getLogger('z.review')
class ReviewManager(amo.models.ManagerBase):
def __init__(self, include_deleted=False):
# DO NOT change the default value of include_deleted unless you've read
# through the comment just above the Addon managers
# declaration/instanciation and understand the consequences.
super(ReviewManager, self).__init__()
self.include_deleted = include_deleted
def get_queryset(self):
qs = super(ReviewManager, self).get_queryset()
qs = qs._clone(klass=ReviewQuerySet)
if not self.include_deleted:
qs = qs.exclude(deleted=True).exclude(reply_to__deleted=True)
return qs
def valid(self):
"""Get all reviews that aren't replies."""
return self.filter(reply_to__isnull=True)
class ReviewQuerySet(caching.CachingQuerySet):
"""
A queryset modified for soft deletion.
"""
def delete(self):
for review in self:
review.delete()
class Review(amo.models.ModelBase):
addon = models.ForeignKey('addons.Addon', related_name='_reviews')
version = models.ForeignKey('versions.Version', related_name='reviews',
null=True)
user = models.ForeignKey('users.UserProfile', related_name='_reviews_all')
reply_to = models.ForeignKey('self', null=True, unique=True,
related_name='replies', db_column='reply_to')
rating = models.PositiveSmallIntegerField(null=True)
title = TranslatedField(require_locale=False)
body = TranslatedField(require_locale=False)
ip_address = models.CharField(max_length=255, default='0.0.0.0')
editorreview = models.BooleanField(default=False)
flag = models.BooleanField(default=False)
sandbox = models.BooleanField(default=False)
client_data = models.ForeignKey('stats.ClientData', null=True, blank=True)
deleted = models.BooleanField(default=False)
# Denormalized fields for easy lookup queries.
# TODO: index on addon, user, latest
is_latest = models.BooleanField(
default=True, editable=False,
help_text="Is this the user's latest review for the add-on?")
previous_count = models.PositiveIntegerField(
default=0, editable=False,
help_text="How many previous reviews by the user for this add-on?")
# The order of those managers is very important: please read the lengthy
# comment above the Addon managers declaration/instanciation.
unfiltered = ReviewManager(include_deleted=True)
objects = ReviewManager()
class Meta:
db_table = 'reviews'
ordering = ('-created',)
def get_url_path(self):
return helpers.url('addons.reviews.detail', self.addon.slug, self.id)
def flush_urls(self):
urls = ['*/addon/%d/' % self.addon_id,
'*/addon/%d/reviews/' % self.addon_id,
'*/addon/%d/reviews/format:rss' % self.addon_id,
'*/addon/%d/reviews/%d/' % (self.addon_id, self.id),
'*/user/%d/' % self.user_id, ]
return urls
def delete(self):
self.update(deleted=True)
# This should happen in the `post_save` hook.
# self.refresh(update_denorm=True)
def undelete(self):
self.update(deleted=False)
# This should happen in the `post_save` hook.
# self.refresh(update_denorm=True)
@classmethod
def get_replies(cls, reviews):
reviews = [r.id for r in reviews]
qs = Review.objects.filter(reply_to__in=reviews)
return dict((r.reply_to_id, r) for r in qs)
@staticmethod
def post_save(sender, instance, created, **kwargs):
if kwargs.get('raw'):
return
instance.refresh(update_denorm=created)
if created:
# Avoid slave lag with the delay.
check_spam.apply_async(args=[instance.id], countdown=600)
def refresh(self, update_denorm=False):
from addons.models import update_search_index
from . import tasks
if update_denorm:
pair = self.addon_id, self.user_id
# Do this immediately so is_latest is correct. Use default
# to avoid slave lag.
tasks.update_denorm(pair, using='default')
# Review counts have changed, so run the task and trigger a reindex.
tasks.addon_review_aggregates.delay(self.addon_id, using='default')
update_search_index(self.addon.__class__, self.addon)
@staticmethod
def transformer(reviews):
user_ids = dict((r.user_id, r) for r in reviews)
for user in UserProfile.objects.no_cache().filter(id__in=user_ids):
user_ids[user.id].user = user
models.signals.post_save.connect(Review.post_save, sender=Review,
dispatch_uid='review_post_save')
models.signals.pre_save.connect(save_signal, sender=Review,
dispatch_uid='review_translations')
# TODO: translate old flags.
class ReviewFlag(amo.models.ModelBase):
SPAM = 'review_flag_reason_spam'
LANGUAGE = 'review_flag_reason_language'
SUPPORT = 'review_flag_reason_bug_support'
OTHER = 'review_flag_reason_other'
FLAGS = (
(SPAM, _(u'Spam or otherwise non-review content')),
(LANGUAGE, _(u'Inappropriate language/dialog')),
(SUPPORT, _(u'Misplaced bug report or support request')),
(OTHER, _(u'Other (please specify)')),
)
review = models.ForeignKey(Review)
user = models.ForeignKey('users.UserProfile', null=True)
flag = models.CharField(max_length=64, default=OTHER,
choices=FLAGS, db_column='flag_name')
note = models.CharField(max_length=100, db_column='flag_notes', blank=True,
default='')
class Meta:
db_table = 'reviews_moderation_flags'
unique_together = (('review', 'user'),)
def flush_urls(self):
return self.review.flush_urls()
class GroupedRating(object):
"""
Group an add-on's ratings so we can have a graph of rating counts.
SELECT rating, COUNT(rating) FROM reviews where addon=:id
"""
# Non-critical data, so we always leave it in memcache. Numbers are updated
# when a new review comes in.
prefix = 'addons:grouped:rating'
@classmethod
def key(cls, addon):
return '%s:%s' % (cls.prefix, addon)
@classmethod
def get(cls, addon, update_none=True):
try:
grouped_ratings = cache.get(cls.key(addon))
if update_none and grouped_ratings is None:
return cls.set(addon)
return grouped_ratings
except Exception:
# Don't worry about failures, especially timeouts.
return
@classmethod
def set(cls, addon, using=None):
q = (Review.objects.valid().using(using)
.filter(addon=addon, is_latest=True)
.values_list('rating')
.annotate(models.Count('rating')))
counts = dict(q)
ratings = [(rating, counts.get(rating, 0)) for rating in range(1, 6)]
cache.set(cls.key(addon), ratings)
return ratings
class Spam(object):
def add(self, review, reason):
reason = 'amo:review:spam:%s' % reason
try:
reasonset = cache.get('amo:review:spam:reasons', set())
except KeyError:
reasonset = set()
try:
idset = cache.get(reason, set())
except KeyError:
idset = set()
reasonset.add(reason)
cache.set('amo:review:spam:reasons', reasonset)
idset.add(review.id)
cache.set(reason, idset)
return True
def reasons(self):
return cache.get('amo:review:spam:reasons')
@task
def check_spam(review_id, **kw):
spam = Spam()
try:
review = Review.objects.using('default').get(id=review_id)
except Review.DoesNotExist:
log.error('Review does not exist, check spam for review_id: %s'
% review_id)
return
thirty_days = datetime.now() - timedelta(days=30)
others = (Review.objects.no_cache().exclude(id=review.id)
.filter(user=review.user, created__gte=thirty_days))
if len(others) > 10:
spam.add(review, 'numbers')
if (review.body is not None and
bleach.url_re.search(review.body.localized_string)):
spam.add(review, 'urls')
for other in others:
if ((review.title and review.title == other.title) or
review.body == other.body):
spam.add(review, 'matches')
break
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.