gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import os
import time
from ethereum import utils
from ethereum import pruning_trie as trie
from ethereum.refcount_db import RefcountDB
import ethereum.db as db
from ethereum.utils import to_string, is_string
import rlp
from rlp.utils import encode_hex
from ethereum import blocks
from ethereum import processblock
from ethereum.slogging import get_logger
import sys
log = get_logger('eth.chain')
class Index(object):
""""
Collection of indexes
children:
- needed to get the uncles of a block
blocknumbers:
- needed to mark the longest chain (path to top)
transactions:
- optional to resolve txhash to block:tx
"""
def __init__(self, db, index_transactions=True):
self.db = db
self._index_transactions = index_transactions
def add_block(self, blk):
self.add_child(blk.prevhash, blk.hash)
if self._index_transactions:
self._add_transactions(blk)
# block by number #########
def _block_by_number_key(self, number):
return 'blocknumber:%d' % number
def update_blocknumbers(self, blk):
"start from head and update until the existing indices match the block"
while True:
if blk.number > 0:
self.db.put_temporarily(self._block_by_number_key(blk.number), blk.hash)
else:
self.db.put(self._block_by_number_key(blk.number), blk.hash)
self.db.commit_refcount_changes(blk.number)
if blk.number == 0:
break
blk = blk.get_parent()
if self.has_block_by_number(blk.number) and \
self.get_block_by_number(blk.number) == blk.hash:
break
def has_block_by_number(self, number):
return self._block_by_number_key(number) in self.db
def get_block_by_number(self, number):
"returns block hash"
return self.db.get(self._block_by_number_key(number))
# transactions #############
def _add_transactions(self, blk):
"'tx_hash' -> 'rlp([blockhash,tx_number])"
for i, tx in enumerate(blk.get_transactions()):
self.db.put_temporarily(tx.hash, rlp.encode([blk.hash, i]))
self.db.commit_refcount_changes(blk.number)
def get_transaction(self, txhash):
"return (tx, block, index)"
blockhash, tx_num_enc = rlp.decode(self.db.get(txhash))
blk = rlp.decode(self.db.get(blockhash), blocks.Block, db=self.db)
num = utils.decode_int(tx_num_enc)
tx_data = blk.get_transaction(num)
return tx_data, blk, num
# children ##############
def _child_db_key(self, blk_hash):
return b'ci:' + blk_hash
def add_child(self, parent_hash, child_hash):
# only efficient for few children per block
children = list(set(self.get_children(parent_hash) + [child_hash]))
assert children.count(child_hash) == 1
self.db.put_temporarily(self._child_db_key(parent_hash), rlp.encode(children))
def get_children(self, blk_hash):
"returns block hashes"
key = self._child_db_key(blk_hash)
if key in self.db:
return rlp.decode(self.db.get(key))
return []
class Chain(object):
"""
Manages the chain and requests to it.
:ivar head_candidate: the block which if mined by our miner would become
the new head
"""
head_candidate = None
def __init__(self, db, genesis=None, new_head_cb=None, coinbase='\x00' * 20):
self.db = self.blockchain = db
self.new_head_cb = new_head_cb
self.index = Index(db)
self._coinbase = coinbase
if genesis and 'HEAD' not in self.db:
self._initialize_blockchain(genesis)
log.debug('chain @', head_hash=self.head)
self.genesis = self.get(self.index.get_block_by_number(0))
log.debug('got genesis', nonce=self.genesis.nonce.encode('hex'),
difficulty=self.genesis.difficulty)
self._update_head_candidate()
def _initialize_blockchain(self, genesis=None):
log.info('Initializing new chain')
if not genesis:
genesis = blocks.genesis(self.blockchain, difficulty=blocks.GENESIS_DIFFICULTY)
log.info('new genesis', genesis_hash=genesis, difficulty=genesis.difficulty)
self.index.add_block(genesis)
self._store_block(genesis)
assert genesis == blocks.get_block(self.blockchain, genesis.hash)
self._update_head(genesis)
assert genesis.hash in self
self.commit()
@property
def coinbase(self):
assert self.head_candidate.coinbase == self._coinbase
return self._coinbase
@coinbase.setter
def coinbase(self, value):
self._coinbase = value
# block reward goes to different address => redo finalization of head candidate
self._update_head(self.head)
@property
def head(self):
if self.blockchain is None or 'HEAD' not in self.blockchain:
self._initialize_blockchain()
ptr = self.blockchain.get('HEAD')
return blocks.get_block(self.blockchain, ptr)
def _update_head(self, block, forward_pending_transactions=True):
log.debug('updating head')
if not block.is_genesis():
#assert self.head.chain_difficulty() < block.chain_difficulty()
if block.get_parent() != self.head:
log.debug('New Head is on a different branch',
head_hash=block, old_head_hash=self.head)
# Some temporary auditing to make sure pruning is working well
if block.number > 0 and block.number % 500 == 0 and isinstance(db, RefcountDB):
trie.proof.push(trie.RECORDING)
block.to_dict(with_state=True)
n = trie.proof.get_nodelist()
trie.proof.pop()
sys.stderr.write('State size: %d\n' % sum([(len(rlp.encode(a)) + 32) for a in n]))
# Fork detected, revert death row and change logs
if block.number > 0:
b = block.get_parent()
h = self.head
b_children = []
if b.hash != h.hash:
log.warn('reverting')
while h.number > b.number:
h.state.db.revert_refcount_changes(h.number)
h = h.get_parent()
while b.number > h.number:
b_children.append(b)
b = b.get_parent()
while b.hash != h.hash:
h.state.db.revert_refcount_changes(h.number)
h = h.get_parent()
b_children.append(b)
b = b.get_parent()
for bc in b_children:
processblock.verify(bc, bc.get_parent())
self.blockchain.put('HEAD', block.hash)
assert self.blockchain.get('HEAD') == block.hash
sys.stderr.write('New head: %s %d\n' % (utils.encode_hex(block.hash), block.number))
self.index.update_blocknumbers(self.head)
self._update_head_candidate(forward_pending_transactions)
if self.new_head_cb and not block.is_genesis():
self.new_head_cb(block)
def _update_head_candidate(self, forward_pending_transactions=True):
"after new head is set"
log.debug('updating head candidate')
# collect uncles
blk = self.head # parent of the block we are collecting uncles for
uncles = set(u.header for u in self.get_brothers(blk))
for i in range(blocks.MAX_UNCLE_DEPTH + 2):
for u in blk.uncles:
assert isinstance(u, blocks.BlockHeader)
uncles.discard(u)
if blk.has_parent():
blk = blk.get_parent()
assert not uncles or max(u.number for u in uncles) <= self.head.number
uncles = list(uncles)[:blocks.MAX_UNCLES]
# create block
ts = max(int(time.time()), self.head.timestamp + 1)
d = db.OverlayDB(self.head.db)
head_candidate = blocks.Block.init_from_parent(self.head, coinbase=self._coinbase,
timestamp=ts, uncles=uncles, db=d)
assert head_candidate.validate_uncles()
self.pre_finalize_state_root = head_candidate.state_root
head_candidate.finalize()
# add transactions from previous head candidate
old_head_candidate = self.head_candidate
self.head_candidate = head_candidate
if old_head_candidate is not None and forward_pending_transactions:
log.debug('forwarding pending transactions')
for tx in old_head_candidate.get_transactions():
self.add_transaction(tx)
else:
log.debug('discarding pending transactions')
def get_uncles(self, block):
"""Return the uncles of `block`."""
if not block.has_parent():
return []
else:
return self.get_brothers(block.get_parent())
def get_brothers(self, block):
"""Return the uncles of the hypothetical child of `block`."""
o = []
i = 0
while block.has_parent() and i < blocks.MAX_UNCLE_DEPTH:
parent = block.get_parent()
o.extend([u for u in self.get_children(parent) if u != block])
block = block.get_parent()
i += 1
return o
def get(self, blockhash):
assert is_string(blockhash)
assert len(blockhash) == 32
return blocks.get_block(self.blockchain, blockhash)
def has_block(self, blockhash):
assert is_string(blockhash)
assert len(blockhash) == 32
return blockhash in self.blockchain
def __contains__(self, blockhash):
return self.has_block(blockhash)
def _store_block(self, block):
if block.number > 0:
self.blockchain.put_temporarily(block.hash, rlp.encode(block))
else:
self.blockchain.put(block.hash, rlp.encode(block))
def commit(self):
self.blockchain.commit()
def add_block(self, block, forward_pending_transactions=True):
"returns True if block was added sucessfully"
_log = log.bind(block_hash=block)
# make sure we know the parent
if not block.has_parent() and not block.is_genesis():
_log.debug('missing parent')
return False
if not block.validate_uncles():
_log.debug('invalid uncles')
return False
if not len(block.nonce) == 8:
_log.debug('nonce not set')
return False
elif not block.header.check_pow(nonce=block.nonce) and\
not block.is_genesis():
_log.debug('invalid nonce')
return False
if block.has_parent():
try:
processblock.verify(block, block.get_parent())
except processblock.VerificationFailed as e:
_log.critical('VERIFICATION FAILED', error=e)
f = os.path.join(utils.data_dir, 'badblock.log')
open(f, 'w').write(to_string(block.hex_serialize()))
return False
if block.number < self.head.number:
_log.debug("older than head", head_hash=self.head)
# Q: Should we have any limitations on adding blocks?
self.index.add_block(block)
self._store_block(block)
# set to head if this makes the longest chain w/ most work for that number
if block.chain_difficulty() > self.head.chain_difficulty():
_log.debug('new head')
self._update_head(block, forward_pending_transactions)
elif block.number > self.head.number:
_log.warn('has higher blk number than head but lower chain_difficulty',
head_hash=self.head, block_difficulty=block.chain_difficulty(),
head_difficulty=self.head.chain_difficulty())
block.transactions.clear_all()
block.receipts.clear_all()
block.state.db.commit_refcount_changes(block.number)
block.state.db.cleanup(block.number)
self.commit() # batch commits all changes that came with the new block
return True
def get_children(self, block):
return [self.get(c) for c in self.index.get_children(block.hash)]
def add_transaction(self, transaction):
"""Add a transaction to the :attr:`head_candidate` block.
If the transaction is invalid, the block will not be changed.
:returns: `True` is the transaction was successfully added or `False`
if the transaction was invalid
"""
assert self.head_candidate is not None
head_candidate = self.head_candidate
log.debug('add tx', num_txs=self.num_transactions(), tx=transaction, on=head_candidate)
if self.head_candidate.includes_transaction(transaction.hash):
log.debug('known tx')
return
old_state_root = head_candidate.state_root
# revert finalization
head_candidate.state_root = self.pre_finalize_state_root
try:
success, output = processblock.apply_transaction(head_candidate, transaction)
except processblock.InvalidTransaction as e:
# if unsuccessful the prerequisites were not fullfilled
# and the tx is invalid, state must not have changed
log.debug('invalid tx', error=e)
head_candidate.state_root = old_state_root # reset
return False
log.debug('valid tx')
# we might have a new head_candidate (due to ctx switches in pyethapp)
if self.head_candidate != head_candidate:
log.debug('head_candidate changed during validation, trying again')
self.add_transaction(transaction)
return
self.pre_finalize_state_root = head_candidate.state_root
head_candidate.finalize()
log.debug('tx applied', result=output)
assert old_state_root != head_candidate.state_root
return True
def get_transactions(self):
"""Get a list of new transactions not yet included in a mined block
but known to the chain.
"""
if self.head_candidate:
log.debug('get_transactions called', on=self.head_candidate)
return self.head_candidate.get_transactions()
else:
return []
def num_transactions(self):
if self.head_candidate:
return self.head_candidate.transaction_count
else:
return 0
def get_chain(self, start='', count=10):
"return 'count' blocks starting from head or start"
log.debug("get_chain", start=encode_hex(start), count=count)
blocks = []
block = self.head
if start:
if start not in self.index.db:
return []
block = self.get(start)
if not self.in_main_branch(block):
return []
for i in range(count):
blocks.append(block)
if block.is_genesis():
break
block = block.get_parent()
return blocks
def in_main_branch(self, block):
try:
return block.hash == self.index.get_block_by_number(block.number)
except KeyError:
return False
def get_descendants(self, block, count=1):
log.debug("get_descendants", block_hash=block)
assert block.hash in self
block_numbers = list(range(block.number + 1, min(self.head.number + 1,
block.number + count + 1)))
return [self.get(self.index.get_block_by_number(n)) for n in block_numbers]
|
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# author: clarkmatthew
# modified by: Trevor Hodde
'''
Example:
import eulogger
self.logger = eulogger.Eulogger(name='euca')
self.log = self.logger.log
self.log.debug("This is a debug message")
self.log.critical("this is a critical message")
'''
import os
import sys
import logging
import time
class Eulogger(object):
#constructor for the Eulogger
def __init__(self,
parent_logger_name = 'eutester',
identifier="eulogger",
stdout_level="debug",
stdout_format = None,
logfile = "",
logfile_level="debug",
make_log_file_global=True,
use_global_log_files=True,
file_format = None,
clear_file = False):
"""
This class basically sets up a child debugger for testing purposes.
It allows the user to set up a new logger object and pass different logging formats and levels so different
objects and modules can log with unique identifiers and logging levels.
:param parent_logger_name: Name of root/parent logger
:param identifier: identifier used for log formatting and child logger name
:param stdout_level: log level (see 'logging' class) for std out handler under this child logger
:param stdout_format: logging format used by this child logger's stdout handler
:param logfile: file path to use for this child logger's logging file handler
:param logfile_level: log level (see 'logging' class) for file handler under this child logger
:param file_format: logging formate used by this child logger's file handler
:param clear_file: will attempt to remove 'logfile' before creating handler. Will not remove parent's files.
:param make_log_file_global: boolean, will add this logfile to parent so other child loggers create afterward
will attempt to create a handler that writes to this file as well.
:param use_global_log_files: boolean, will query the parent logger for any file handlers and will attemp to
create a handler for this child logger using the same file
#Debug for init...
print ( "-----------------------------------------------" \
+ "\nparent_logger_name:" + str(parent_logger_name) \
+ "\neulogger init:" \
+ "\nidentifier:" + str(identifier) \
+ "\nstdout_level:" + str(stdout_level) \
+ "\nstdout_format:" + str(stdout_format) \
+ "\nlogfile:" + str(logfile) \
+ "\nlogfile_level:" + str(logfile_level) \
+ "\nfile_format:" + str(file_format) \
+ "\nclear_file:" + str(clear_file) \
+ "\n-----------------------------------------------" )
"""
self.logfile = os.path.join(logfile)
self.clear_file = clear_file
#Create of fetch existing logger of name 'logger_name
self.parent_logger_name = parent_logger_name
self.identifier = identifier
self.name = identifier + str(time.time())
self.parent_logger = logging.getLogger(self.parent_logger_name)
self.log = self.getChild(self.parent_logger, self.name)
self.file_info_list = []
#map string for log level to 'logging' class type or default to logging.DEBUG if string isn't found
self.stdout_level = logging.__dict__.get(stdout_level.upper(),logging.DEBUG)
self.logfile_level = logging.__dict__.get(logfile_level.upper(),logging.DEBUG)
#set the parent and child logger levels to the lowest of the two handler levels
if self.stdout_level < self.logfile_level:
self.logger_level = self.stdout_level
else:
self.logger_level = self.logfile_level
if self.log.level > self.logger_level or self.log.level == 0:
self.log.setLevel(self.logfile_level)
if self.parent_logger > self.logger_level or self.log.level == 0:
self.parent_logger.setLevel(self.logfile_level)
#set some default and canned formatters for logging output
self.default_format = stdout_format or logging.Formatter('[%(asctime)s] [' + self.identifier + '] [%(levelname)s]: %(message)s')
self.file_format = file_format or self.default_format
#Add a few canned formatters for reference/convenience
self.formatter2 = logging.Formatter('[%(asctime)s] [' + self.identifier + '] [%(levelname)s] [%(filename)s:%(funcName)s():%(lineno)d]: %(message)s')
self.formatter3 = logging.Formatter( self.identifier +':%(funcName)s():%(lineno)d: %(message)s')
self.formatter4 = logging.Formatter('%(message)s')
self.stdout_handler = logging.StreamHandler(sys.stdout)
self.stdout_handler.setFormatter(self.default_format)
self.stdout_handler.setLevel(self.stdout_level)
#Add filter so only log records from this child logger are handled
self.stdout_handler.addFilter(Allow_Logger_By_Name(self.log.name))
if self.stdout_handler not in self.log.handlers:
self.log.addHandler(self.stdout_handler)
else:
print "Not adding stdout handler for this eulogger:" +str(self.identifier)
#Now add the file handlers...
if use_global_log_files:
self.file_info_list = self.get_parent_logger_files()
if (self.logfile):
self.file_info_list.append(File_Handler_Info(self.logfile,self.logfile_level))
#If the clear flag is set remove the file first...
if (self.clear_file):
try:
os.remove(self.logfile)
except Exception, e:
print "Error while attempting to remove log file '" + self.logfile + "', err:" + str(e)
if make_log_file_global:
self.add_muted_file_handler_to_parent_logger(self.logfile,self.logfile_level)
for fileinfo in self.file_info_list:
file_hdlr = logging.FileHandler(fileinfo.filepath)
file_hdlr.setFormatter(self.file_format)
file_hdlr.setLevel(fileinfo.level)
#Add filter so only log records from this child logger are handled
file_hdlr.addFilter(Allow_Logger_By_Name(self.log.name))
#Make sure this is not a duplicate handler or this file is a dup of another handler
if file_hdlr not in self.log.handlers:
add = True
for h in self.log.handlers:
if h.stream.name == file_hdlr.stream.name:
add = False
self.log.debug('File already has log handler:' + str(logfile.filepath))
break
if add:
self.log.addHandler(file_hdlr)
else:
print "Not adding logfile handler for this eulogger:" +str(self.identifier)
def add_muted_file_handler_to_parent_logger(self,filepath, level):
file_handler = logging.FileHandler(filepath)
file_handler.setLevel(level)
file_handler.addFilter(Mute_Filter())
def get_parent_logger_files(self):
files = []
for h in self.parent_logger.handlers:
if isinstance(h, logging.FileHandler):
files.append(File_Handler_Info(h.stream.name, h.level))
return files
def getChild(self, logger, suffix):
"""
## Add this for 2.6 support, this was implemented in 2.7...###
"""
if hasattr(logger,'getChild'):
return logger.getChild(suffix)
else:
if logger.root is not logger:
suffix = '.'.join((logger.name, suffix))
return logger.manager.getLogger(suffix)
class File_Handler_Info():
def __init__(self, filepath, level):
if not filepath or not level:
raise Exception("File_Handler_Info None option not allowed, filepath:"+str(filepath)+",level:"+str(level))
self.filepath = filepath
self.level = level
class Allow_Logger_By_Name(logging.Filter):
"""
Only messages from this logger are allow through to prevent duplicates from other loggers of same level, etc..
"""
def __init__(self, name=""):
logging.Filter.__init__(self, name)
def filter(self, record):
return record.name == self.name
class Mute_Filter(logging.Filter):
def filter(self, record):
return False
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plug-in to format the Services and Drivers key with Start and Type values."""
from plaso.lib import event
from plaso.parsers.winreg_plugins import interface
class ServicesPlugin(interface.ValuePlugin):
"""Plug-in to format the Services and Drivers keys having Type and Start."""
NAME = 'winreg_services'
REG_VALUES = frozenset(['Type', 'Start'])
REG_TYPE = 'SYSTEM'
URLS = ['http://support.microsoft.com/kb/103000']
SERVICE_START = {
0: 'Boot (0)',
1: 'System (1)',
2: 'Auto Start (2)',
3: 'Manual (3)',
4: 'Disabled (4)'
}
SERVICE_TYPE = {
1: 'Kernel Device Driver (0x1)',
2: 'File System Driver (0x2)',
4: 'Adapter (0x4)',
16: 'Service - Own Process (0x10)',
32: 'Service - Share Process (0x20)'
}
SERVICE_ERROR = {
0: 'Ignore (0)',
1: 'Normal (1)',
2: 'Severe (2)',
3: 'Critical (3)'
}
OBJECT_NAMES = [
'localsystem', 'nt authority\\localservice',
'nt authority\\networkservice']
def GetImagePath(self, subkey, service_type):
"""Returns the Image Path String with alerts for unusual settings.
Returns Image Path with alerts for:
Drivers having ImagePath outside system32/drivers.
Services NOT having ImagePath.
Args:
subkey: Services subkey to format.
service_type: Integer expressing the type of service or driver.
Returns:
A Unicode string when the ImagePath value is set in service.
A driver does not have an ImagePath so None is returned.
A REGALERT Unicode string is returned when an anomaly is detected.
"""
image_path = subkey.GetValue('ImagePath')
if service_type > 0 and service_type < 15:
if not image_path:
return None
if not image_path.data or not image_path.DataIsString():
return 'REGALERT: Driver does not have a valid ImagePath.'
image_path_str = image_path.data
if 'system32\\drivers' not in image_path_str.lower():
return u'REGALERT Driver not in system32: {}'.format(image_path_str)
return image_path_str
elif service_type > 15 and service_type < 257:
if not image_path:
return 'REGALERT: Service does not have ImagePath.'
if not image_path.data or not image_path.DataIsString():
return 'REGALERT: Service does not have a valid ImagePath.'
return u'\'{}\''.format(image_path.data)
return None
def GetObjectName(self, subkey, service_type):
"""Returns the ObjectName for Service with alerts for unusual settings.
Alerts are for:
Service with NO ObjectName.
Service with unusual ObjectName.
Driver with ObjectName.
Args:
subkey: The Services subkey to format.
services_type: Integer expressing the type of service or driver.
Returns:
None when Driver does not have an ObjectName.
String with ObjectName and alerts.
"""
# Only Services should have ObjectName -- the "user" who started it.
object_name = subkey.GetValue('ObjectName')
# Handle Drivers first. Alert if Driver has an ObjectName.
if service_type > 0 and service_type < 15:
if not object_name:
return None
if object_name.data and object_name.DataIsString():
object_name_str = object_name.data
else:
object_name_str = u'UNKNOWN'
return u'REGALERT Driver has ObjectName: {}'.format(
object_name_str)
elif service_type > 15 and service_type < 257:
if not object_name:
return u'REGALERT Service does not have ObjectName'
if not object_name.data or not object_name.DataIsString():
return u'REGALERT Service does not have a valid ObjectName'
object_name_str = object_name.data
if object_name_str.lower() not in self.OBJECT_NAMES:
# There are 3 primary owners, all others are noteworthy.
return u'REGALERT Unusual Owner: {}'.format(
object_name_str)
return None
def GetEntries(self, key, **unused_kwargs):
"""Create one event for each subkey under Services that has Type and Start.
Adds descriptions of the ErrorControl, Type and StartvValues.
Alerts on unusual settingssuch as Start/Type mismatches or drivers outside
of C:/Windows/system32/drivers.
Args:
key: A Windows Registry key (instance of WinRegKey).
Yields:
Event objects extracted from the Windows service values.
"""
text_dict = {}
service_type_value = key.GetValue('Type')
service_start_value = key.GetValue('Start')
if service_type_value and service_start_value:
service_type = service_type_value.data
text_dict['Type'] = self.SERVICE_TYPE.get(service_type, service_type)
service_start = service_start_value.data
service_start_str = self.SERVICE_START.get(service_start, service_start)
# Check for unusal Type/Start pairs.
if service_type > 0 and service_type < 15 and service_start == 2:
service_start_str = 'REGALERT Unusual Start for Driver: {}'.format(
self.SERVICE_START[service_start])
if service_type > 15 and service_type < 257 and service_start in [0, 1]:
service_start_str = 'REGALERT Unusal Start for Service: {}'.format(
self.SERVICE_START[service_start])
text_dict['Start'] = service_start_str
# Convert ErrorControl to Human Readable.
if key.GetValue('ErrorControl'):
error_control = key.GetValue('ErrorControl').data
text_dict['ErrorControl'] = self.SERVICE_ERROR.get(
error_control, error_control)
object_name = self.GetObjectName(key, service_type)
if object_name:
text_dict['ObjectName'] = object_name
image_path = self.GetImagePath(key, service_type)
if image_path:
text_dict['ImagePath'] = image_path
# Gather all the other string and integer values and insert as they are.
for value in key.GetValues():
if not value.name:
continue
if value.name not in text_dict:
if value.DataIsString() or value.DataIsInteger():
text_dict[value.name] = value.data
elif value.DataIsMultiString():
text_dict[value.name] = u', '.join(value.data)
event_object = event.WinRegistryEvent(
key.path, text_dict, timestamp=key.last_written_timestamp)
yield event_object
|
|
from desispec.workflow.exptable import get_exposure_table_name,get_exposure_table_path, \
get_exposure_flags, get_last_step_options, get_exposure_table_column_defs, \
keyval_change_reporting, deconstruct_keyval_reporting
from desispec.workflow.tableio import load_table, write_table
from desispec.workflow.utils import pathjoin
from desispec.io.util import parse_cameras, decode_camword, create_camword, parse_badamps, validate_badamps
import os
import numpy as np
from astropy.table import Table
def process_int_range_inclusive(input_string):
"""
Given a str indicating a range of integers, this auto-detects the symbol used and returns that range as an INCLUSIVE
numpy array of ints. Symbol can be ':', '-', or '..'.
Args:
input_string, str. String with integer range with the upper value being included in the output. E.g. 100:102
returns 100,101,102.
Returns:
np.array. Array of ints for the range specified in the input_string.
"""
for symbol in [':','-','..']:
if symbol in input_string:
first,last = input_string.split(symbol)
return np.arange(int(first),int(last)+1)
def parse_int_list_term(input_string, allints=None):
"""
Given a str this determines what integer values it represents. Whether that be "all" indicating all ints in the
table column, a range of integers specified with ':', '-', or '..', or a single integer. This should not be a list.
Args:
input_string, str. String with either integer range, single integer, or 'all'. 'all' requires allints
allints, np.array. One dimensional array of all integers. Returns if 'all' is specified.
Returns:
out_array, np.array. Array of ints for the string specified.
"""
if input_string.lower() == 'all' and allints is not None:
out_array = np.asarray(allints)
elif input_string.isnumeric():
out_array = np.atleast_1d(int(input_string))
elif np.any([symb in input_string for symb in [':','-','..']]):
out_array = process_int_range_inclusive(input_string)
else:
raise ValueError(f"Couldn't understand input {input_string}")
return out_array
def parse_int_list(input_string, allints=None, only_unique=True):
"""
Given a str this determines what integer values it represents. Whether that be "all" indicating all ints in the
table column, a range of integers specified with ':', '-', or '..', a single integer, or an indeterminant number of
them in a comma separated list.
Args:
input_string, str. String with either integer range, single integer, or 'all'. It can have a combination of
multiple of these separated by a comma. 'all' requires allints.
allints, np.array. One dimensional array of all integers. Returns if 'all' is specified.
only_unique, bool. True if you want a unique set returned. Otherwise repeated entries in the input string
are kept.
Returns:
out_array, np.array. Array of ints for the string specified.
"""
input_string = input_string.strip(' \t,')
out_array = np.atleast_1d()
for substr in input_string.split(","):
out_array = np.append(out_array, parse_int_list_term(substr, allints=allints))
if only_unique:
out_array = np.unique(out_array)
return out_array.astype(int)
def columns_not_to_report():
"""
Returns list of column names that shouldn't have reporting information saved because they are user-defined values.
"""
return ['COMMENTS', 'HEADERERR', 'BADCAMWORD', 'BADAMPS', 'LASTSTEP', 'EXPFLAG']
def columns_not_to_edit():
"""
Defines column names that shouldn't be edited.
"""
## Occasionally unchanging things like NIGHT or TILEID have been missing in the headers, so we won't restrict
## that even though it typically shouldn't be edited if the data is there
return ['EXPID', 'CAMWORD', 'OBSTYPE']
def columns_not_to_append():
"""
Defines column names that shouldn't be edited.
"""
return ['LASTSTEP', 'SURVEY', 'FA_SURV', 'FAPRGRM', 'GOALTYPE']
def validate_value(colname, value, joinsymb):
"""
Checks that the value provided matches the syntax of the colname given. If the syntax is incorrect
an error is raised.
Warning: may change the value of "value" and returns it.
Args:
colname, str. The name of the column that is being edited.
value, any scalar type. The value that the column's current value should be changed to.
Returns:
value, any scalar type. The value that the column's current value should be changed to. This is verified to
have the proper syntax for the colname given.
"""
## Match data type and convert where necessary
if colname == 'EXPFLAG':
## Make sure the exposure flag is a valid one
expflags = get_exposure_flags()
value = value.lower().replace(' ','_')
if value not in expflags:
raise ValueError(f"Couldn't understand exposure flag: '{value}'. Available options are: {expflags}.")
elif colname == 'BADAMPS':
## Make sure we can decode the badamp value (or easily correct it so we can decode it)
## This raises an error if it can't be converted to a viable list
value = validate_badamps(value, joinsymb=joinsymb)
elif colname == 'BADCAMWORD':
## Make sure we can understand the cameras given
## This raises an error if it cant be parsed
value = parse_cameras(value)
elif colname == 'LASTSTEP':
options = get_last_step_options()
value = value.lower()
if value not in options:
raise ValueError(f"Couldn't understand laststep: '{value}'. Available options are: {options}.")
elif joinsymb in value:
print(f"WARNING: For colname {colname} you provided a value '{value}' that contains the default"+
f" joinsymbol='{joinsymb}'. This is allowed, but use at your own caution. Continuing...")
elif '|' in value:
print(f"WARNING: For colname {colname} you provided a value '{value}' that contains the default"+
" indicator of an array string in the exposure tables (the 'pipe' i.e. '|'."+
" This is allowed, but use at your own caution. Continuing...")
else:
## Otherwise we don't have a strict syntax, so pass it
pass
return value
def document_in_comments(tablerow,colname,value,comment_col='HEADERERR'):
"""
Places "reporting" string in the appropriate comment column of the exposure table to document the edits being
made.
Note: This alters and returns the input tablerow. How astropy handles this may vary. As of Jan 2021, I believe a copy
is made in memory upon altering of a tablerow object. So the output here should be returned and assigned to
overwrite the old value in the input table.
Args:
tablerow, astropy.table.Row. A table row with columns colname and comment_col. Comment_col must be a numpy array.
colname, str. The name of the column that is being edited.
value, any scalar type. The value that the column's current value should be changed to.
comment_col, str. The name of the comment column where the change reporting should be placed. Default is HEADERERR.
Returns:
tablerow, astropy.table.Row. A table row with columns colname and comment_col. Comment_col is a numpy array
with the new reporting string included.
"""
colname = colname.upper()
if colname in columns_not_to_report():
return tablerow
existing_entries = [colname in term for term in tablerow[comment_col]]
if np.any(existing_entries):
loc = np.where(existing_entries)[0][0]
entry = tablerow[comment_col][loc]
key, origval, oldval = deconstruct_keyval_reporting(entry)
if key != colname:
print("Key didn't match colname in document_in_comments")
raise
new_entry = keyval_change_reporting(colname, origval, value)
tablerow[comment_col][loc] = new_entry
else:
reporting = keyval_change_reporting(colname, tablerow[colname], value)
tablerow[comment_col] = np.append(tablerow[comment_col], reporting)
return tablerow
def change_exposure_table_rows(exptable, exp_str, colname, value, include_comment='', append_string=False,
overwrite_value=False, joinsymb=','):
"""
Changes the column named colname to value of value for rows of exposure table in exptable that correspond to the
exposures defined in exp_str.
Note: This edits and returns the exptable given in the inputs.
Args:
exptable, astropy.table.Table. An exposure table defined in desispec.workflow.exptable. Each column is an exposure.
exp_str, str. A string representing the exposure ID's for which you want to edit the column to a new value.
The string can be any combination of integer ranges, single integers, or 'all'. Each range or integer
is separated by a comma. 'all' implies all exposures. Ranges can be given using ':', '-', or '..'.
All ranges are assumed to be inclusive.
colname, str. The column name in the exptable where you want to change values.
value, any scalar type. The value you want to change the column value of each exp_str exposure row to.
include_comment, str. A user specified comment to be added to the COMMENTS column after setting colname to
value for the given exp_str exposures.
append_string, bool. True if you want to append your input value to the end of an existing string.
overwrite_value, bool. Default is False. Must be set to True if you want to overwrite a non-default value.
If current value is a default value for that column for that row,
this doesn't need to be set.
joinsymb, str. The symbol used to separate string elements that are being appended. Shouldn't be '|'.
Default is ','.
Returns:
exptable, astropy.table.Table. The exposure table given in the input, with edits made to the column colname
for the rows corresponding to the exposure ID's in exp_str.
"""
## Make sure colname exists before proceeding
## Don't edit fixed columns
colname = colname.upper()
if colname in columns_not_to_edit():
raise ValueError(f"Not allowed to edit colname={colname}.")
if colname not in exptable.colnames:
raise ValueError(f"Colname {colname} not in exposure table")
if append_string and colname in columns_not_to_append():
raise ValueError(f"Cannot append_string to {colname}")
if append_string and overwrite_value:
raise ValueError("Cannot append_string and overwrite_value.")
## Parse the exposure numbers
exposure_list = parse_int_list(exp_str, allints=exptable['EXPID'].data, only_unique=True)
## Match exposures to row numbers
row_numbers = []
for exp in exposure_list:
rownum = np.where(exptable['EXPID'] == exp)[0]
if rownum.size > 0:
row_numbers.append(rownum[0])
row_numbers = np.asarray(row_numbers)
## Make sure the value will work
## (returns as is if fine, corrects syntax if it can, or raises an error if it can't)
value = validate_value(colname, value, joinsymb)
## If appending camwords, let's convert to camera list only once to save computation
if colname == 'BADCAMWORD' and append_string:
value_as_camlist = decode_camword(value)
## Inform user on whether reporting will be done
if colname in columns_not_to_report():
print("Won't do comment reporting for user defined column.")
## Get column names and definitions
colnames,coldtypes,coldeflts = get_exposure_table_column_defs(return_default_values=True)
colnames,coldtypes,coldeflts = np.array(colnames),np.array(coldtypes),np.array(coldeflts,dtype=object)
cur_dtype = coldtypes[colnames==colname][0]
cur_default = coldeflts[colnames==colname][0]
if include_comment != '' and 'COMMENTS' not in colnames:
print("Given a comment to append to the exposure tables, but COMMENTS isn't in column names. "+
"Not including comment")
## Assign new value
isstr = (cur_dtype in [str, np.str, np.str_] or type(cur_dtype) is str)
isarr = (cur_dtype in [list, np.array, np.ndarray])
appendable = (colname not in columns_not_to_append())
if append_string and not isstr:
raise ValueError(f"Told to append_string but {colname} isn't a string: {cur_dtype}")
elif overwrite_value:
print(f"Overwriting values in column: {colname} to '{value}' for exposures: {exposure_list}.")
elif append_string:
print(f"Appending '{value}' to existing entries in column: {colname} for exposures: {exposure_list}.")
elif isarr:
print(f"Appending {value} to arrays in column: {colname} for exposures: {exposure_list}.")
else:
print(f"Changing default values in column: {colname} to '{value}' for exposures: {exposure_list}.")
orig_exptable = exptable.copy()[row_numbers]
for rownum in row_numbers:
if colname == 'BADCAMWORD' and exptable[colname][rownum] != cur_default and append_string:
curcams = decode_camword(exptable[colname][rownum])
if len(set(value_as_camlist).difference(set(curcams))) == 0:
print(f"For exposure: {exp}. Asked to append '{value}' to '{exptable[colname][rownum]}'" +
" but all bad cameras are already present. Skipping and not commenting.")
continue
else:
curcams.extend(value_as_camlist)
combinedcams = list(set(curcams))
exptable[colname][rownum] = create_camword(combinedcams)
elif colname == 'BADAMPS' and append_string and exptable[colname][rownum] != cur_default:
curamps = exptable[colname][rownum].split(joinsymb)
value_as_amplist = value.split(joinsymb)
newvals = list(set(value_as_amplist).difference(set(curamps)))
if len(newvals) == 0:
print(f"For exposure: {exp}. Asked to append '{value}' to '{exptable[colname][rownum]}'"+
" but all badamps are already present. Skipping and not commenting.")
continue
else:
curamps.extend(newvals)
exptable[colname][rownum] = joinsymb.join(curamps)
elif isstr and append_string and exptable[colname][rownum] != cur_default:
exptable[colname][rownum] += f'{joinsymb}{value}'
elif isarr:
if overwrite_value and len(exptable[colname][rownum])>0:
exptable[rownum] = document_in_comments(exptable[rownum],colname,value)
exptable[colname][rownum] = np.append(cur_default, value)
else:
exptable[colname][rownum] = np.append(exptable[colname][rownum], value)
else:
if overwrite_value or exptable[colname][rownum] == cur_default:
exptable[rownum] = document_in_comments(exptable[rownum],colname,value)
exptable[colname][rownum] = value
else:
exp = exptable[rownum]['EXPID']
err = f"In exposure {exp} for column {colname}: asked to fill non-default " + \
f"entry '{exptable[colname][rownum]}' with '{value}'.\n" + \
f"\t\tTo overwrite, use --overwrite-value.\n"
if appendable:
err += "\t\tTo append to the existing, use --append-string.\n"
err += f"\t\tOriginal column entries for requested exposures were:\n"
for exp,val in zip(list(orig_exptable['EXPID']), list(orig_exptable[colname])):
err += f"\t\t\t{exp}: {val}\n"
err += "\n\t\tNo entries updated. Exiting."
raise ValueError (err)
if include_comment != '' and 'COMMENTS' in colnames:
exptable['COMMENTS'][rownum] = np.append(exptable['COMMENTS'][rownum], include_comment)
meaningful_comments = (exptable['COMMENTS'][rownum] != '')
exptable['COMMENTS'][rownum] = exptable['COMMENTS'][rownum][meaningful_comments]
return exptable
def edit_exposure_table(exp_str, colname, value, night=None, include_comment='', tablepath=None,
append_string=False, overwrite_value=False, use_spec_prod=True,
read_user_version=False, write_user_version=False, overwrite_file=True, joinsymb=','):
"""
Edits the exposure table on disk to change the column named colname to value of value for rows of exposure table
that correspond to the exposures defined in exp_str. The table on disk can be defined using night given directly
with tablepath.
Note: This overwrites an exposure table file on disk by default.
Args:
exp_str, str. A string representing the exposure ID's for which you want to edit the column to a new value.
The string can be any combination of integer ranges, single integers, or 'all'. Each range or integer
is separated by a comma. 'all' implies all exposures. Ranges can be given using ':', '-', or '..'.
All ranges are assumed to be inclusive.
colname, str. The column name in the exptable where you want to change values.
value, any scalar type. The value you want to change the column value of each exp_str exposure row to.
night, str or int. The night the exposures were acquired on. This uniquely defines the exposure table.
include_comment, str. A user specified comment to be added to the COMMENTS column after setting colname to
value for the given exp_str exposures.
tablepath, str. A relative or absolute path to the exposure table file, if named differently from the default
in desispec.workflow.exptable.
append_string, bool. True if you want to append your input value to the end of an existing string.
overwrite_value, bool. Default is False. Must be set to True if you want to overwrite a non-default value.
If current value is a default value for that column for that row,
this doesn't need to be set.
use_spec_prod, bool. True if you want to read in the exposure table defined by night from the currently
defined SPECPROD as opposed to the exposure table repository location. Default is True.
read_user_version, bool. True if you want to read in an exposure table saved including the current user's
USER name. Meant for test editing of a file multiple times. If the file doesn't exist,
the non-user value is loaded. Default is False.
write_user_version, bool. True if you want to write in an exposure table saved including the current user's
USER name. Meant for test editing of a file without overwriting the true exposure table.
Default is False.
overwrite_file, bool. True if you want to overwrite the file on disk. Default is True.
joinsymb, str. The symbol used to separate string elements that are being appended. Shouldn't be '|'.
Default is ','.
"""
## Don't edit fixed columns
colname = colname.upper()
if tablepath is None and night is None:
raise ValueError("Must specify night or the path to the table.")
if colname in columns_not_to_edit():
raise ValueError(f"Not allowed to edit colname={colname}.")
if append_string and colname in ['LASTSTEP', 'SURVEY', 'FA_SURV', 'FAPRGRM', 'GOALTYPE']:
raise ValueError(f"Cannot append_string to {colname}")
if append_string and overwrite_value:
raise ValueError("Cannot append_string and overwrite_value.")
## Get the file locations
if tablepath is not None:
path, name = os.path.split(tablepath)
else:
path = get_exposure_table_path(night=night, usespecprod=use_spec_prod)
name = get_exposure_table_name(night=night)#, extension='.csv')
pathname = pathjoin(path, name)
user_pathname = os.path.join(path, name.replace('.csv', '_' + str(os.environ['USER']) + '.csv'))
## Read in the table
if read_user_version:
if os.path.isfile(user_pathname):
exptable = load_table(tablename=user_pathname, tabletype='exptable')
else:
print("Couldn't locate a user version of the exposure table, loading the default version of the table.")
exptable = load_table(tablename=pathname, tabletype='exptable')
else:
exptable = load_table(tablename=pathname, tabletype='exptable')
if exptable is None:
print("There was a problem loading the exposure table... Exiting.")
return
## Do the modification
outtable = change_exposure_table_rows(exptable, exp_str, colname, value, include_comment,
append_string, overwrite_value, joinsymb)
## Write out the table
if write_user_version:
write_table(outtable, tablename=user_pathname, tabletype='exptable', overwrite=overwrite_file)
print(f"Wrote edited table to: {user_pathname}")
else:
write_table(outtable, tablename=pathname, tabletype='exptable', overwrite=overwrite_file)
print(f"Wrote edited table to: {pathname}")
|
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Manage a single benchmark and, when run from the commandline, report
its runtime to a file.
"""
# !!!!!!!!!!!!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!!!!
# This file, unlike most others, must be compatible with as many
# versions of Python as possible and have no dependencies outside of
# the Python standard library. This is the only bit of code from asv
# that is imported into the benchmarking process.
# Remove asv package directory from sys.path. This script file resides
# there although it's not part of the package, and Python puts it to
# sys.path[0] on start which can shadow other modules
import sys
sys.path.pop(0)
import copy
try:
import cProfile as profile
except:
profile = None
import ctypes
from ctypes.util import find_library
import errno
import imp
import inspect
import itertools
import json
import os
import pickle
import re
import textwrap
import timeit
# The best timer we can use is time.process_time, but it is not
# available in the Python stdlib until Python 3.3. This is a ctypes
# backport for Pythons that don't have it.
try:
from time import process_time
except ImportError: # Python <3.3
if sys.platform.startswith("linux"):
CLOCK_PROCESS_CPUTIME_ID = 2 # time.h
clockid_t = ctypes.c_int
time_t = ctypes.c_long
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', time_t), # seconds
('tv_nsec', ctypes.c_long) # nanoseconds
]
_clock_gettime = ctypes.CDLL(
find_library('rt'), use_errno=True).clock_gettime
_clock_gettime.argtypes = [clockid_t, ctypes.POINTER(timespec)]
def process_time():
tp = timespec()
if _clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ctypes.byref(tp)) < 0:
err = ctypes.get_errno()
msg = errno.errorcode[err]
if err == errno.EINVAL:
msg += (
"The clk_id (4) specified is not supported on this system")
raise OSError(err, msg)
return tp.tv_sec + tp.tv_nsec * 1e-9
elif sys.platform == 'darwin':
RUSAGE_SELF = 0 # sys/resources.h
time_t = ctypes.c_long
suseconds_t = ctypes.c_int32
class timeval(ctypes.Structure):
_fields_ = [
('tv_sec', time_t),
('tv_usec', suseconds_t)
]
class rusage(ctypes.Structure):
_fields_ = [
('ru_utime', timeval),
('ru_stime', timeval),
('ru_maxrss', ctypes.c_long),
('ru_ixrss', ctypes.c_long),
('ru_idrss', ctypes.c_long),
('ru_isrss', ctypes.c_long),
('ru_minflt', ctypes.c_long),
('ru_majflt', ctypes.c_long),
('ru_nswap', ctypes.c_long),
('ru_inblock', ctypes.c_long),
('ru_oublock', ctypes.c_long),
('ru_msgsnd', ctypes.c_long),
('ru_msgrcv', ctypes.c_long),
('ru_nsignals', ctypes.c_long),
('ru_nvcsw', ctypes.c_long),
('ru_nivcsw', ctypes.c_long)
]
_getrusage = ctypes.CDLL(find_library('c'), use_errno=True).getrusage
_getrusage.argtypes = [ctypes.c_int, ctypes.POINTER(rusage)]
def process_time():
ru = rusage()
if _getrusage(RUSAGE_SELF, ctypes.byref(ru)) < 0:
err = ctypes.get_errno()
msg = errno.errorcode[err]
if err == errno.EINVAL:
msg += (
"The clk_id (0) specified is not supported on this system")
raise OSError(err, msg)
return float(ru.ru_utime.tv_sec + ru.ru_utime.tv_usec * 1e-6 +
ru.ru_stime.tv_sec + ru.ru_stime.tv_usec * 1e-6)
else:
# Fallback to default timer
process_time = timeit.default_timer
def get_maxrss():
# Fallback function, in case we don't have one that works on the
# current platform
return None
if sys.platform.startswith('win'):
import ctypes
import ctypes.wintypes
SIZE_T = ctypes.c_size_t
class PROCESS_MEMORY_COUNTERS(ctypes.Structure):
_fields_ = [
('cb', ctypes.wintypes.DWORD),
('PageFaultCount', ctypes.wintypes.DWORD),
('PeakWorkingSetSize', SIZE_T),
('WorkingSetSize', SIZE_T),
('QuotaPeakPagedPoolUsage', SIZE_T),
('QuotaPagedPoolUsage', SIZE_T),
('QuotaPeakNonPagedPoolUsage', SIZE_T),
('QuotaNonPagedPoolUsage', SIZE_T),
('PagefileUsage', SIZE_T),
('PeakPagefileUsage', SIZE_T),
]
GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
GetCurrentProcess.argtypes = []
GetCurrentProcess.restype = ctypes.wintypes.HANDLE
GetProcessMemoryInfo = ctypes.windll.psapi.GetProcessMemoryInfo
GetProcessMemoryInfo.argtypes = (ctypes.wintypes.HANDLE,
ctypes.POINTER(PROCESS_MEMORY_COUNTERS),
ctypes.wintypes.DWORD)
GetProcessMemoryInfo.restype = ctypes.wintypes.BOOL
def get_maxrss():
proc_hnd = GetCurrentProcess()
counters = PROCESS_MEMORY_COUNTERS()
info = GetProcessMemoryInfo(proc_hnd, ctypes.byref(counters), ctypes.sizeof(counters))
if info == 0:
raise ctypes.WinError()
return counters.PeakWorkingSetSize
else:
try:
import resource
# POSIX
if sys.platform == 'darwin':
def get_maxrss():
# OSX getrusage returns maxrss in bytes
# https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/getrusage.2.html
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
else:
def get_maxrss():
# Linux, *BSD return maxrss in kilobytes
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 1024
except ImportError:
pass
try:
from importlib import import_module
except ImportError: # For Python 2.6
def _resolve_name(name, package, level):
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
def _get_attr(source, name, ignore_case=False):
if ignore_case:
attrs = [getattr(source, key) for key in dir(source)
if key.lower() == name.lower()]
if len(attrs) > 1:
raise ValueError(
"{0} contains multiple {1} functions.".format(
source.__name__, name))
elif len(attrs) == 1:
return attrs[0]
else:
return None
else:
return getattr(source, name, None)
def _get_all_attrs(sources, name, ignore_case=False):
for source in sources:
val = _get_attr(source, name, ignore_case=ignore_case)
if val is not None:
yield val
def _get_first_attr(sources, name, default, ignore_case=False):
for val in _get_all_attrs(sources, name, ignore_case=ignore_case):
return val
return default
def get_benchmark_type_from_name(name):
for bm_type in benchmark_types:
if bm_type.name_regex.match(name):
return bm_type
return None
def get_setup_cache_key(func):
if func is None:
return None
return '{0}:{1}'.format(inspect.getsourcefile(func),
inspect.getsourcelines(func)[1])
class Benchmark(object):
"""
Represents a single benchmark.
"""
# The regex of the name of function or method to be considered as
# this type of benchmark. The default in the base class, will
# match nothing.
name_regex = re.compile('^$')
def __init__(self, name, func, attr_sources):
name = name.split('.', 1)[1]
self.name = name
self.func = func
self._attr_sources = attr_sources
self._setups = list(_get_all_attrs(attr_sources, 'setup', True))[::-1]
self._teardowns = list(_get_all_attrs(attr_sources, 'teardown', True))
self._setup_cache = _get_first_attr(attr_sources, 'setup_cache', None)
self.setup_cache_key = get_setup_cache_key(self._setup_cache)
self.timeout = _get_first_attr(attr_sources, "timeout", 60.0)
self.code = textwrap.dedent(inspect.getsource(self.func))
self.type = "base"
self.unit = "unit"
self.__redo_setup_first = True
self._params = _get_first_attr(attr_sources, "params", [])
self.param_names = _get_first_attr(attr_sources, "param_names", [])
self._current_params = ()
# Enforce params format
try:
self.param_names = [str(x) for x in list(self.param_names)]
except ValueError:
raise ValueError("%s.param_names is not a list of strings" % (name,))
try:
self._params = list(self._params)
except ValueError:
raise ValueError("%s.params is not a list" % (name,))
if self._params and not isinstance(self._params[0], (tuple, list)):
# Accept a single list for one parameter only
self._params = [self._params]
else:
self._params = [[item for item in entry] for entry in self._params]
if len(self.param_names) != len(self._params):
self.param_names = self.param_names[:len(self._params)]
self.param_names += ['param%d' % (k+1,) for k in range(len(self.param_names),
len(self._params))]
# Exported parameter representations
self.params = [[repr(item) for item in entry] for entry in self._params]
def set_param_idx(self, param_idx):
try:
self._current_params, = itertools.islice(
itertools.product(*self._params),
param_idx, param_idx + 1)
except ValueError:
raise ValueError(
"Invalid benchmark parameter permutation index: %r" % (param_idx,))
def insert_param(self, param):
"""
Insert a parameter at the front of the parameter list.
"""
self._current_params = tuple([param] + list(self._current_params))
def __repr__(self):
return '<{0} {1}>'.format(self.__class__.__name__, self.name)
@classmethod
def from_function(cls, func):
"""
Create a benchmark object from a free function.
"""
module = inspect.getmodule(func)
name = '.'.join(
[module.__name__, func.__name__])
return cls(name, func, [func, inspect.getmodule(func)])
@classmethod
def from_class_method(cls, klass, method_name):
"""
Create a benchmark object from a method.
Parameters
----------
klass : type
The class containing the method.
method_name : str
The name of the method.
"""
module = inspect.getmodule(klass)
instance = klass()
func = getattr(instance, method_name)
name = '.'.join(
[module.__name__, klass.__name__, method_name])
return cls(name, func, [func, instance, module])
@classmethod
def from_name(cls, root, name, quick=False):
"""
Create a benchmark from a fully-qualified benchmark name.
Parameters
----------
root : str
Path to the root of a benchmark suite.
name : str
Fully-qualified name to a specific benchmark.
"""
update_sys_path(root)
def find_on_filesystem(root, parts, package):
path = os.path.join(root, parts[0])
if package:
new_package = package + '.' + parts[0]
else:
new_package = parts[0]
if os.path.isfile(path + '.py'):
module = import_module(new_package)
return find_in_module(module, parts[1:])
elif os.path.isdir(path):
return find_on_filesystem(
path, parts[1:], new_package)
def find_in_module(module, parts):
attr = getattr(module, parts[0], None)
if attr is not None:
if inspect.isfunction(attr):
if len(parts) == 1:
bm_type = get_benchmark_type_from_name(parts[0])
if bm_type is not None:
return bm_type.from_function(attr)
elif inspect.isclass(attr):
if len(parts) == 2:
bm_type = get_benchmark_type_from_name(parts[1])
if bm_type is not None:
return bm_type.from_class_method(attr, parts[1])
raise ValueError(
"Could not find benchmark '{0}'".format(name))
if '-' in name:
try:
name, param_idx = name.split('-', 1)
param_idx = int(param_idx)
except ValueError:
raise ValueError("Benchmark id %r is invalid" % (name,))
else:
param_idx = None
parts = name.split('.')
benchmark = find_on_filesystem(
root, parts, os.path.basename(root))
if param_idx is not None:
benchmark.set_param_idx(param_idx)
if quick:
benchmark.repeat = 1
benchmark.number = 1
return benchmark
def do_setup(self):
try:
for setup in self._setups:
setup(*self._current_params)
except NotImplementedError:
# allow skipping test
return True
return False
def redo_setup(self):
if self.__redo_setup_first:
self.__redo_setup_first = False
return
self.do_teardown()
self.do_setup()
def do_teardown(self):
for teardown in self._teardowns:
teardown(*self._current_params)
def do_setup_cache(self):
if self._setup_cache is not None:
return self._setup_cache()
def do_run(self):
return self.run(*self._current_params)
def do_profile(self, filename=None):
def method_caller():
run(*params)
if profile is None:
raise RuntimeError("cProfile could not be imported")
if filename is not None:
if hasattr(method_caller, 'func_code'):
code = method_caller.func_code
else:
code = method_caller.__code__
self.redo_setup()
profile.runctx(
code, {'run': self.func, 'params': self._current_params},
{}, filename)
class TimeBenchmark(Benchmark):
"""
Represents a single benchmark for timing.
"""
name_regex = re.compile(
'^(Time[A-Z_].+)|(time_.+)$')
def __init__(self, name, func, attr_sources):
Benchmark.__init__(self, name, func, attr_sources)
self.type = "time"
self.unit = "seconds"
self._attr_sources = attr_sources
self._load_vars()
def _load_vars(self):
self.repeat = _get_first_attr(self._attr_sources, 'repeat', 0)
self.number = int(_get_first_attr(self._attr_sources, 'number', 0))
self.goal_time = _get_first_attr(self._attr_sources, 'goal_time', 2.0)
self.timer = _get_first_attr(self._attr_sources, 'timer', process_time)
def do_setup(self):
result = Benchmark.do_setup(self)
# For parameterized tests, setup() is allowed to change these
self._load_vars()
return result
def run(self, *param):
number = self.number
repeat = self.repeat
if repeat == 0:
repeat = timeit.default_repeat
if param:
func = lambda: self.func(*param)
else:
func = self.func
timer = timeit.Timer(
stmt=func,
setup=self.redo_setup,
timer=self.timer)
if number == 0:
# determine number automatically so that
# goal_time / 10 <= total time < goal_time
number = 1
for i in range(1, 10):
timing = timer.timeit(number)
if timing >= 5*self.goal_time and number == 1 and self.repeat == 0:
# very slow benchmark: use a default repeat value of 1
self.repeat = repeat = 1
break
elif timing >= self.goal_time / 10.0:
break
number *= 10
self.number = number
# keep the timing from the run we already made
repeat -= 1
all_runs = [timing]
else:
all_runs = []
if repeat > 0:
all_runs.extend(timer.repeat(repeat, number))
best = min(all_runs) / number
return best
class MemBenchmark(Benchmark):
"""
Represents a single benchmark for tracking the memory consumption
of an object.
"""
name_regex = re.compile(
'^(Mem[A-Z_].+)|(mem_.+)$')
def __init__(self, name, func, attr_sources):
Benchmark.__init__(self, name, func, attr_sources)
self.type = "memory"
self.unit = "bytes"
def run(self, *param):
# We can't import asizeof directly, because we haven't loaded
# the asv package in the benchmarking process.
path = os.path.join(
os.path.dirname(__file__), 'extern', 'asizeof.py')
asizeof = imp.load_source('asizeof', path)
obj = self.func(*param)
sizeof2 = asizeof.asizeof([obj, obj])
sizeofcopy = asizeof.asizeof([obj, copy.copy(obj)])
return sizeofcopy - sizeof2
class PeakMemBenchmark(Benchmark):
"""
Represents a single benchmark for tracking the peak memory consumption
of the whole program.
"""
name_regex = re.compile(
'^(PeakMem[A-Z_].+)|(peakmem_.+)$')
def __init__(self, name, func, attr_sources):
Benchmark.__init__(self, name, func, attr_sources)
self.type = "peakmemory"
self.unit = "bytes"
def run(self, *param):
self.func(*param)
return get_maxrss()
class TrackBenchmark(Benchmark):
"""
Represents a single benchmark for tracking an arbitrary value.
"""
name_regex = re.compile(
'^(Track[A-Z_].+)|(track_.+)$')
def __init__(self, name, func, attr_sources):
Benchmark.__init__(self, name, func, attr_sources)
self.type = _get_first_attr(attr_sources, "type", "track")
self.unit = _get_first_attr(attr_sources, "unit", "unit")
def run(self, *param):
return self.func(*param)
# TODO: Support the creation of custom benchmark types
benchmark_types = [
TimeBenchmark, MemBenchmark, PeakMemBenchmark, TrackBenchmark
]
class SpecificImporter(object):
"""
Module importer that only allows loading a given module from the
given path.
Using this enables importing the asv benchmark suite without
adding its parent directory to sys.path. The parent directory can
in principle contain anything, including some version of the
project module (common situtation if asv.conf.json is on project
repository top level).
"""
def __init__(self, name, root):
self._name = name
self._root = root
def find_module(self, fullname, path=None):
if fullname == self._name:
return self
return None
def load_module(self, fullname):
file, pathname, desc = imp.find_module(fullname, [self._root])
return imp.load_module(fullname, file, pathname, desc)
def update_sys_path(root):
sys.meta_path.insert(0, SpecificImporter(os.path.basename(root),
os.path.dirname(root)))
def disc_class(klass):
"""
Iterate over all benchmarks in a given class.
For each method with a special name, yields a Benchmark
object.
"""
for key, val in inspect.getmembers(klass):
bm_type = get_benchmark_type_from_name(key)
if bm_type is not None and (inspect.isfunction(val) or inspect.ismethod(val)):
yield bm_type.from_class_method(klass, key)
def disc_objects(module):
"""
Iterate over all benchmarks in a given module, returning
Benchmark objects.
For each class definition, looks for any methods with a
special name.
For each free function, yields all functions with a special
name.
"""
for key, val in module.__dict__.items():
if key.startswith('_'):
continue
if inspect.isclass(val):
for benchmark in disc_class(val):
yield benchmark
elif inspect.isfunction(val):
bm_type = get_benchmark_type_from_name(key)
if bm_type is not None:
yield bm_type.from_function(val)
def disc_files(root, package=''):
"""
Iterate over all .py files in a given directory tree.
"""
for filename in os.listdir(root):
path = os.path.join(root, filename)
if os.path.isfile(path):
filename, ext = os.path.splitext(filename)
if ext == '.py':
module = import_module(package + filename)
yield module
elif os.path.isdir(path):
for x in disc_files(path, package + filename + "."):
yield x
def disc_benchmarks(root):
"""
Discover all benchmarks in a given directory tree.
"""
for module in disc_files(root, os.path.basename(root) + '.'):
for benchmark in disc_objects(module):
yield benchmark
def list_benchmarks(root, fp):
"""
List all of the discovered benchmarks to fp as JSON.
"""
update_sys_path(root)
# Streaming of JSON back out to the master process
fp.write('[')
first = True
for benchmark in disc_benchmarks(root):
if not first:
fp.write(', ')
clean = dict(
(k, v) for (k, v) in benchmark.__dict__.items()
if isinstance(v, (str, int, float, list, dict, bool)) and not
k.startswith('_'))
json.dump(clean, fp, skipkeys=True)
first = False
fp.write(']')
def main_discover(args):
benchmark_dir, result_file = args
with open(result_file, 'w') as fp:
list_benchmarks(benchmark_dir, fp)
def main_setup_cache(args):
(benchmark_dir, benchmark_id) = args
benchmark = Benchmark.from_name(benchmark_dir, benchmark_id)
cache = benchmark.do_setup_cache()
with open("cache.pickle", "wb") as fd:
pickle.dump(cache, fd)
def main_run(args):
(benchmark_dir, benchmark_id, quick, profile_path, result_file) = args
quick = (quick == 'True')
if profile_path == 'None':
profile_path = None
benchmark = Benchmark.from_name(
benchmark_dir, benchmark_id, quick=quick)
if benchmark.setup_cache_key is not None:
with open("cache.pickle", "rb") as fd:
cache = pickle.load(fd)
if cache is not None:
benchmark.insert_param(cache)
skip = benchmark.do_setup()
try:
if skip:
result = float('nan')
else:
result = benchmark.do_run()
if profile_path is not None:
benchmark.do_profile(profile_path)
finally:
benchmark.do_teardown()
# Write the output value
with open(result_file, 'w') as fp:
json.dump(result, fp)
commands = {
'discover': main_discover,
'setup_cache': main_setup_cache,
'run': main_run
}
if __name__ == '__main__':
mode = sys.argv[1]
args = sys.argv[2:]
if mode in commands:
commands[mode](args)
sys.exit(0)
else:
sys.stderr.write("Unknown mode {0}\n".format(mode))
sys.exit(1)
|
|
import os.path
from datetime import datetime
from django.contrib.auth.models import User
from django.contrib.auth.views import logout as logout_view
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.test.utils import override_settings, override_script_prefix
from mock import patch
from nose.tools import eq_, ok_
from waffle.models import Flag
from mozillians.common.tests import TestCase, requires_login, requires_vouch
from mozillians.phonebook.models import Invite
from mozillians.phonebook.tests import InviteFactory, _get_privacy_fields
from mozillians.users.managers import MOZILLIANS, PRIVATE, PUBLIC
from mozillians.users.models import UserProfilePrivacyModel
from mozillians.users.tests import UserFactory
class SearchTests(TestCase):
def test_search_plugin_anonymous(self):
client = Client()
response = client.get(reverse('dino_park:search_plugin'), follow=True)
eq_(response.status_code, 200)
eq_(response.get('content-type'),
'application/opensearchdescription+xml')
def test_search_plugin_unvouched(self):
user = UserFactory.create(vouched=False)
with self.login(user) as client:
response = client.get(reverse('dino_park:search_plugin'),
follow=True)
eq_(response.status_code, 200)
eq_(response.get('content-type'),
'application/opensearchdescription+xml')
def test_search_plugin_vouched(self):
user = UserFactory.create()
with self.login(user) as client:
response = client.get(reverse('dino_park:search_plugin'),
follow=True)
eq_(response.status_code, 200)
eq_(response.get('content-type'),
'application/opensearchdescription+xml')
class InviteTests(TestCase):
@requires_login()
def test_invite_anonymous(self):
client = Client()
client.get(reverse('phonebook:invite'), follow=True)
@requires_vouch()
def test_invite_unvouched(self):
user = UserFactory.create(vouched=False)
with self.login(user) as client:
client.get(reverse('phonebook:invite'), follow=True)
def test_invite_get_vouched(self):
user = UserFactory.create()
with self.login(user) as client:
response = client.get(reverse('phonebook:invite'), follow=True)
self.assertTemplateUsed(response, 'phonebook/invite.html')
@override_settings(CAN_VOUCH_THRESHOLD=1)
@patch('mozillians.phonebook.views.messages.success')
def test_invite_post_vouched(self, success_mock):
user = UserFactory.create()
with override_script_prefix('/en-US/'):
url = reverse('phonebook:invite')
data = {
'message': 'Join us foo!',
'recipient': 'foo@example.com',
'description': 'A test reason'
}
with self.login(user) as client:
response = client.post(url, data, follow=True)
self.assertTemplateUsed(response, 'phonebook/invite.html')
ok_(Invite.objects
.filter(recipient='foo@example.com', inviter=user.userprofile)
.exists())
ok_(success_mock.called)
@override_settings(CAN_VOUCH_THRESHOLD=1)
def test_invite_already_vouched(self):
vouched_user = UserFactory.create()
user = UserFactory.create()
with override_script_prefix('/en-US/'):
url = reverse('phonebook:invite')
data = {'recipient': vouched_user.email}
with self.login(user) as client:
response = client.post(url, data, follow=True)
self.assertTemplateUsed(response, 'phonebook/invite.html')
ok_('recipient' in response.context['invite_form'].errors)
eq_(Invite.objects.all().count(), 0)
def test_invite_delete(self):
user = UserFactory.create(userprofile={'is_vouched': True})
invite = InviteFactory.create(inviter=user.userprofile)
with override_script_prefix('/en-US/'):
url = reverse('phonebook:delete_invite', kwargs={'invite_pk': invite.pk})
with self.login(user) as client:
response = client.post(url, follow=True)
eq_(Invite.objects.all().count(), 0)
eq_(response.status_code, 200)
def test_invite_delete_invalid_requester(self):
user = UserFactory.create(userprofile={'is_vouched': True})
invite = InviteFactory.create(inviter=user.userprofile)
with override_script_prefix('/en-US/'):
url = reverse('phonebook:delete_invite', kwargs={'invite_pk': invite.pk})
invalid_requester = UserFactory.create(userprofile={'is_vouched': True})
with self.login(invalid_requester) as client:
response = client.post(url)
eq_(Invite.objects.all().count(), 1)
eq_(response.status_code, 404)
def test_invite_delete_redeemed(self):
user = UserFactory.create(userprofile={'is_vouched': True})
invite = InviteFactory.create(inviter=user.userprofile, redeemed=datetime.now())
with override_script_prefix('/en-US/'):
url = reverse('phonebook:delete_invite', kwargs={'invite_pk': invite.pk})
with self.login(user) as client:
response = client.post(url)
eq_(Invite.objects.all().count(), 1)
eq_(response.status_code, 404)
def test_invite_delete_invalid_invite(self):
user = UserFactory.create(userprofile={'is_vouched': True})
with override_script_prefix('/en-US/'):
url = reverse('phonebook:delete_invite', kwargs={'invite_pk': '1'})
with self.login(user) as client:
response = client.post(url)
eq_(response.status_code, 404)
class VouchFormTests(TestCase):
def test_vouch_not_vouched(self):
user = UserFactory.create(vouched=False, userprofile={'privacy_full_name': PUBLIC})
voucher = UserFactory.create(vouched=False)
with override_script_prefix('/en-US/'):
url = reverse('phonebook:profile_view', args=[user.username])
data = {'vouchee': user.userprofile.id,
'description': 'a reason'}
with self.login(voucher) as client:
client.post(url, data)
unvouched_user = User.objects.get(id=user.id)
ok_(not unvouched_user.userprofile.is_vouched)
def test_vouch_no_description(self):
user = UserFactory.create(vouched=False)
voucher = UserFactory.create()
with override_script_prefix('/en-US/'):
url = reverse('phonebook:profile_view', args=[user.username])
data = {'vouchee': user.userprofile.id,
'description': ''}
with self.login(voucher) as client:
client.post(url, data)
unvouched_user = User.objects.get(id=user.id)
ok_(not unvouched_user.userprofile.is_vouched)
@override_settings(CAN_VOUCH_THRESHOLD=1)
@patch('mozillians.phonebook.views.messages.info')
def test_vouch_unvouched(self, info_mock):
user = UserFactory.create(vouched=False)
user.userprofile.vouch(None)
unvouched_user = UserFactory.create(vouched=False)
with override_script_prefix('/en-US/'):
url = reverse('phonebook:profile_view', args=[unvouched_user.username])
data = {'vouchee': unvouched_user.userprofile.id,
'description': 'a reason'}
with self.login(user) as client:
response = client.post(url, data, follow=True)
unvouched_user = User.objects.get(id=unvouched_user.id)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['profile'], unvouched_user.userprofile)
ok_(unvouched_user.userprofile.is_vouched)
ok_(info_mock.called)
self.assertRedirects(response, url)
class LogoutTests(TestCase):
@requires_login()
def test_logout_anonymous(self):
client = Client()
client.get(reverse('phonebook:logout'), follow=True)
@patch('mozillians.phonebook.views.auth_logout', wraps=logout_view)
def test_logout_unvouched(self, logout_mock):
user = UserFactory.create(vouched=False)
with self.login(user) as client:
response = client.get(reverse('phonebook:logout'), follow=True)
eq_(response.status_code, 200)
self.assertTemplateUsed(response, 'phonebook/home.html')
ok_(logout_mock.called)
@patch('mozillians.phonebook.views.auth_logout', wraps=logout_view)
def test_logout_vouched(self, logout_mock):
user = UserFactory.create()
with self.login(user) as client:
response = client.get(reverse('phonebook:logout'), follow=True)
eq_(response.status_code, 200)
self.assertTemplateUsed(response, 'phonebook/home.html')
ok_(logout_mock.called)
class ImageTests(TestCase):
def _upload_photo(self, user, file_path):
"""Helper for the next methods."""
data = {
'full_name': user.userprofile.full_name,
'email': user.email,
'username': user.username,
'lat': 40.005814,
'lng': -3.42071,
'photo': open(file_path, 'rb'),
'externalaccount_set-MAX_NUM_FORMS': '1000',
'externalaccount_set-INITIAL_FORMS': '0',
'externalaccount_set-TOTAL_FORMS': '0',
'language_set-MAX_NUM_FORMS': '1000',
'language_set-INITIAL_FORMS': '0',
'language_set-TOTAL_FORMS': '0',
'basic_section': ''
}
data.update(_get_privacy_fields(MOZILLIANS))
with override_script_prefix('/en-US/'):
url = reverse('phonebook:profile_edit')
with self.login(user) as client:
response = client.post(url, data=data, follow=True)
eq_(response.status_code, 200)
def test_exif_broken(self):
"""Test image with broken EXIF data."""
user = UserFactory.create()
file_path = os.path.join(os.path.dirname(__file__), 'broken_exif.jpg')
self._upload_photo(user, file_path)
def test_no_rgb_colorspace(self):
"""Test with image not in RGB colorspace.
Related bug 928959.
"""
user = UserFactory.create()
file_path = os.path.join(os.path.dirname(__file__),
'broken_colorspace.gif')
self._upload_photo(user, file_path)
def test_converted_larger_image(self):
"""Test image which gets cleaned in forms.py.
Bug 921243 was caused of a valid image, without EXIF
data. That caused image._get_exif() in
phonebook.forms.ProfileForm.clean_photo to raise an
AttributeError and clean the image.
Cleaning the image (by re-saving) did not set the new file
size in the `photo` variable. If the cleaned image was larger
than the original image, this behavior resulted in corrupted
images being fed into PIL, which raises IOErrors.
This test reproduces that behavior and should fail if we don't
update the size of `photo` with the new cleaned image size.
"""
user = UserFactory.create()
file_path = os.path.join(os.path.dirname(__file__), 'broken_marshal.jpg')
self._upload_photo(user, file_path)
def test_save_profile_with_existing_photo(self):
"""Test profiles saves when keep the existing photo.
Related bug 925256.
"""
# Set a user with a photo
user = UserFactory.create()
file_path = os.path.join(os.path.dirname(__file__), 'normal_photo.jpg')
self._upload_photo(user, file_path)
# Re-save profile without uploading a new photo.
data = {
'full_name': user.userprofile.full_name,
'email': user.email,
'username': user.username,
'lat': 40.005814,
'lng': -3.42071,
'externalaccount_set-MAX_NUM_FORMS': '1000',
'externalaccount_set-INITIAL_FORMS': '0',
'externalaccount_set-TOTAL_FORMS': '0',
'language_set-MAX_NUM_FORMS': '1000',
'language_set-INITIAL_FORMS': '0',
'language_set-TOTAL_FORMS': '0',
'basic_section': ''
}
for field in UserProfilePrivacyModel._meta.fields:
data[field.name] = MOZILLIANS
data['privacy_tshirt'] = PRIVATE
with override_script_prefix('/en-US/'):
url = reverse('phonebook:profile_edit')
with self.login(user) as client:
response = client.post(url, data=data, follow=True)
eq_(response.status_code, 200)
class DateValidationTests(TestCase):
def test_date_mozillian_validates_in_different_locales(self):
"""Tests if date_mozillian validates when profile language is e.g. 'es'.
Related bug 914448.
"""
user = UserFactory.create(email='es@example.com')
data = {
'full_name': user.userprofile.full_name,
'email': user.email,
'username': user.username,
'lat': 40.005814,
'lng': -3.42071,
'date_mozillian_year': '2013',
'date_mozillian_month': '1',
'externalaccount_set-MAX_NUM_FORMS': '1000',
'externalaccount_set-INITIAL_FORMS': '0',
'externalaccount_set-TOTAL_FORMS': '0',
'language_set-MAX_NUM_FORMS': '1000',
'language_set-INITIAL_FORMS': '0',
'language_set-TOTAL_FORMS': '0',
'contribution_section': ''
}
data.update(_get_privacy_fields(MOZILLIANS))
with override_script_prefix('/es/'):
url = reverse('phonebook:profile_edit')
with self.login(user) as client:
response = client.post(url, data=data, follow=True)
eq_(response.status_code, 200)
class AboutTests(TestCase):
def test_base(self):
url = reverse('phonebook:about')
client = Client()
response = client.get(url, follow=True)
eq_(response.status_code, 200)
self.assertTemplateUsed(response, 'phonebook/about.html')
class AboutDinoMcVouchTests(TestCase):
def test_base(self):
url = reverse('phonebook:about-dinomcvouch')
client = Client()
response = client.get(url, follow=True)
eq_(response.status_code, 200)
self.assertTemplateUsed(response, 'phonebook/about-dinomcvouch.html')
class VouchTests(TestCase):
@patch('mozillians.phonebook.views.flag_is_active')
def test_vouch_disabled(self, mocked_flag):
# Test that 'vouched' view is not active by default.
mocked_flag.return_value = False
user = UserFactory.create(vouched=False)
url = reverse('phonebook:profile_vouch', args=[user.username])
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 404)
user = User.objects.get(id=user.id)
ok_(not user.userprofile.is_vouched)
@patch('mozillians.phonebook.views.flag_is_active')
def test_unvouch_disabled(self, mocked_flag):
# Test that 'unvouched' view is not active by default.
mocked_flag.return_value = False
user = UserFactory.create(vouched=False)
url = reverse('phonebook:profile_unvouch', args=[user.username])
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 404)
user = User.objects.get(id=user.id)
ok_(not user.userprofile.is_vouched)
def test_vouch(self):
Flag.objects.create(name='testing-autovouch-views', everyone=True)
user = UserFactory.create(vouched=False)
ok_(not user.userprofile.is_vouched)
url = reverse('phonebook:profile_vouch', args=[user.username])
with self.login(user) as client:
client.get(url, follow=True)
user = User.objects.get(id=user.id)
eq_(user.userprofile.vouches_received.all().count(), 1)
eq_(user.userprofile.vouches_received.all()[0].autovouch, True)
def test_unvouch(self):
Flag.objects.create(name='testing-autovouch-views', everyone=True)
user = UserFactory.create()
ok_(user.userprofile.is_vouched)
url = reverse('phonebook:profile_unvouch', args=[user.username])
with self.login(user) as client:
client.get(url, follow=True)
user = User.objects.get(id=user.id)
ok_(not user.userprofile.vouches_received.all().exists())
|
|
"""
Oracle database backend for Django.
Requires cx_Oracle: https://oracle.github.io/python-cx_Oracle/
"""
import datetime
import decimal
import os
import platform
from contextlib import contextmanager
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import IntegrityError
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils.asyncio import async_unsafe
from django.utils.encoding import force_bytes, force_str
from django.utils.functional import cached_property
def _setup_environment(environ):
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.AL32UTF8'),
# This prevents Unicode from getting mangled by getting encoded into the
# potentially non-Unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
# Some of these import cx_Oracle, so import them after checking if it's installed.
from .client import DatabaseClient # NOQA isort:skip
from .creation import DatabaseCreation # NOQA isort:skip
from .features import DatabaseFeatures # NOQA isort:skip
from .introspection import DatabaseIntrospection # NOQA isort:skip
from .operations import DatabaseOperations # NOQA isort:skip
from .schema import DatabaseSchemaEditor # NOQA isort:skip
from .utils import Oracle_datetime # NOQA isort:skip
from .validation import DatabaseValidation # NOQA isort:skip
@contextmanager
def wrap_oracle_errors():
try:
yield
except Database.DatabaseError as e:
# cx_Oracle raises a cx_Oracle.DatabaseError exception with the
# following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# or:
# 'ORA-00001: unique constraint (DJANGOTEST.DEFERRABLE_
# PINK_CONSTRAINT) violated
# Convert that case to Django's IntegrityError exception.
x = e.args[0]
if (
hasattr(x, 'code') and
hasattr(x, 'message') and
x.code == 2091 and
('ORA-02291' in x.message or 'ORA-00001' in x.message)
):
raise IntegrityError(*tuple(e.args))
raise
class _UninitializedOperatorsDescriptor:
def __get__(self, instance, cls=None):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
display_name = 'Oracle'
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11) GENERATED BY DEFAULT ON NULL AS IDENTITY',
'BigAutoField': 'NUMBER(19) GENERATED BY DEFAULT ON NULL AS IDENTITY',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1)',
'CharField': 'NVARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'INTERVAL DAY(9) TO SECOND(6)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'JSONField': 'NCLOB',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1)',
'OneToOneField': 'NUMBER(11)',
'PositiveBigIntegerField': 'NUMBER(19)',
'PositiveIntegerField': 'NUMBER(11)',
'PositiveSmallIntegerField': 'NUMBER(11)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallAutoField': 'NUMBER(5) GENERATED BY DEFAULT ON NULL AS IDENTITY',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
'UUIDField': 'VARCHAR2(32)',
}
data_type_check_constraints = {
'BooleanField': '%(qn_column)s IN (0,1)',
'JSONField': '%(qn_column)s IS JSON',
'NullBooleanField': '%(qn_column)s IN (0,1)',
'PositiveBigIntegerField': '%(qn_column)s >= 0',
'PositiveIntegerField': '%(qn_column)s >= 0',
'PositiveSmallIntegerField': '%(qn_column)s >= 0',
}
# Oracle doesn't support a database index on these columns.
_limited_data_types = ('clob', 'nclob', 'blob')
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = {
**_standard_operators,
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, %, _)
# should be escaped on the database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
_pattern_ops = {
'contains': "'%%' || {} || '%%'",
'icontains': "'%%' || UPPER({}) || '%%'",
'startswith': "{} || '%%'",
'istartswith': "UPPER({}) || '%%'",
'endswith': "'%%' || {}",
'iendswith': "'%%' || UPPER({})",
}
_standard_pattern_ops = {k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)"
" ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
for k, v in _pattern_ops.items()}
_likec_pattern_ops = {k: "LIKEC " + v + " ESCAPE '\\'"
for k, v in _pattern_ops.items()}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
validation_class = DatabaseValidation
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_columns_from_insert = use_returning_into
def _dsn(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT']:
return Database.makedsn(settings_dict['HOST'], int(settings_dict['PORT']), settings_dict['NAME'])
return settings_dict['NAME']
def _connect_string(self):
return '%s/"%s"@%s' % (self.settings_dict['USER'], self.settings_dict['PASSWORD'], self._dsn())
def get_connection_params(self):
conn_params = self.settings_dict['OPTIONS'].copy()
if 'use_returning_into' in conn_params:
del conn_params['use_returning_into']
return conn_params
@async_unsafe
def get_new_connection(self, conn_params):
return Database.connect(
user=self.settings_dict['USER'],
password=self.settings_dict['PASSWORD'],
dsn=self._dsn(),
**conn_params,
)
def init_connection_state(self):
cursor = self.create_cursor()
# Set the territory first. The territory overrides NLS_DATE_FORMAT
# and NLS_TIMESTAMP_FORMAT to the territory default. When all of
# these are set in single statement it isn't clear what is supposed
# to happen.
cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
# Set Oracle date to ANSI date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in
# TO_CHAR().
cursor.execute(
"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
" NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'" +
(" TIME_ZONE = 'UTC'" if settings.USE_TZ else '')
)
cursor.close()
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
cursor = self.create_cursor()
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except Database.DatabaseError:
self.operators = self._likec_operators
self.pattern_ops = self._likec_pattern_ops
else:
self.operators = self._standard_operators
self.pattern_ops = self._standard_pattern_ops
cursor.close()
self.connection.stmtcachesize = 20
# Ensure all changes are preserved even when AUTOCOMMIT is False.
if not self.get_autocommit():
self.commit()
@async_unsafe
def create_cursor(self, name=None):
return FormatStylePlaceholderCursor(self.connection)
def _commit(self):
if self.connection is not None:
with wrap_oracle_errors():
return self.connection.commit()
# Oracle doesn't support releasing savepoints. But we fake them when query
# logging is enabled to keep query counts consistent with other backends.
def _savepoint_commit(self, sid):
if self.queries_logged:
self.queries_log.append({
'sql': '-- RELEASE SAVEPOINT %s (faked)' % self.ops.quote_name(sid),
'time': '0.000',
})
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
Check constraints by setting them to immediate. Return them to deferred
afterward.
"""
with self.cursor() as cursor:
cursor.execute('SET CONSTRAINTS ALL IMMEDIATE')
cursor.execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def oracle_version(self):
with self.temporary_connection():
return tuple(int(x) for x in self.connection.version.split('.'))
class OracleParam:
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and (isinstance(param, datetime.datetime) and
not isinstance(param, Oracle_datetime)):
param = Oracle_datetime.from_datetime(param)
string_size = 0
# Oracle doesn't recognize True and False correctly.
if param is True:
param = 1
elif param is False:
param = 0
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, (Database.Binary, datetime.timedelta)):
self.force_bytes = param
else:
# To transmit to the database, we need Unicode if supported
# To get size right, we must consider bytes.
self.force_bytes = force_str(param, cursor.charset, strings_only)
if isinstance(self.force_bytes, str):
# We could optimize by only converting up to 4000 bytes here
string_size = len(force_bytes(param, cursor.charset, strings_only))
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif string_size > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
elif isinstance(param, datetime.datetime):
self.input_size = Database.TIMESTAMP
else:
self.input_size = None
class VariableWrapper:
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instantiate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class FormatStylePlaceholderCursor:
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
self.cursor.outputtypehandler = self._output_type_handler
@staticmethod
def _output_number_converter(value):
return decimal.Decimal(value) if '.' in value else int(value)
@staticmethod
def _get_decimal_converter(precision, scale):
if scale == 0:
return int
context = decimal.Context(prec=precision)
quantize_value = decimal.Decimal(1).scaleb(-scale)
return lambda v: decimal.Decimal(v).quantize(quantize_value, context=context)
@staticmethod
def _output_type_handler(cursor, name, defaultType, length, precision, scale):
"""
Called for each db column fetched from cursors. Return numbers as the
appropriate Python type.
"""
if defaultType == Database.NUMBER:
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point.
# This will normally be an integer from a sequence,
# but it could be a decimal value.
outconverter = FormatStylePlaceholderCursor._output_number_converter
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
outconverter = float
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntegerField and DecimalField columns.
outconverter = FormatStylePlaceholderCursor._get_decimal_converter(precision, scale)
else:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
outconverter = FormatStylePlaceholderCursor._output_number_converter
return cursor.var(
Database.STRING,
size=255,
arraysize=cursor.arraysize,
outconverter=outconverter,
)
def _format_params(self, params):
try:
return {k: OracleParam(v, self, True) for k, v in params.items()}
except AttributeError:
return tuple(OracleParam(p, self, True) for p in params)
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], 'keys'):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
if sizes:
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
if sizes:
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, 'items'):
return {k: v.force_bytes for k, v in params.items()}
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params, unify_by_values=False):
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
if params is None:
params = []
elif hasattr(params, 'keys'):
# Handle params as dict
args = {k: ":%s" % k for k in params}
query = query % args
elif unify_by_values and params:
# Handle params as a dict with unified query parameters by their
# values. It can be used only in single query execute() because
# executemany() shares the formatted query with each of the params
# list. e.g. for input params = [0.75, 2, 0.75, 'sth', 0.75]
# params_dict = {0.75: ':arg0', 2: ':arg1', 'sth': ':arg2'}
# args = [':arg0', ':arg1', ':arg0', ':arg2', ':arg0']
# params = {':arg0': 0.75, ':arg1': 2, ':arg2': 'sth'}
params_dict = {
param: ':arg%d' % i
for i, param in enumerate(dict.fromkeys(params))
}
args = [params_dict[param] for param in params]
params = {value: key for key, value in params_dict.items()}
query = query % tuple(args)
else:
# Handle params as sequence
args = [(':arg%d' % i) for i in range(len(params))]
query = query % tuple(args)
return query, self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params, unify_by_values=True)
self._guess_input_sizes([params])
with wrap_oracle_errors():
return self.cursor.execute(query, self._param_generator(params))
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams] + [self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
with wrap_oracle_errors():
return self.cursor.executemany(query, [self._param_generator(p) for p in formatted])
def close(self):
try:
self.cursor.close()
except Database.InterfaceError:
# already closed
pass
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
|
|
from optparse import OptionParser, make_option
from dateutil.relativedelta import relativedelta
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.utils import timezone
from timepiece import utils
from timepiece.entries.models import Entry
class Command(BaseCommand):
"""
Management command to check entries for overlapping times.
Use ./manage.py check_entries --help for more details
"""
#boiler plate for console programs using optparse
args = '<user\'s first or last name or user.id> <user\'s first...>...'
help = """Check the database for entries that overlap.
Use --help for options"""
parser = OptionParser()
parser.usage += """
./manage.py check_entries [<first or last name1> <name2>...<name n>] [OPTIONS]
For options type:
./manage.py check_entries --help
"""
def make_options(self, *args, **kwargs):
"""
Define the arguments that can be used with this command
"""
return (
#Jenkins arguments to ignore
make_option('--pep8-exclude',
dest='ignore_pep8',
type='str',
default='',
help='Jenkins only'),
) + (
make_option('--coverage-exclude',
dest='ignore_coverage',
type='str',
default='',
help='Jenkins only'),
) + (
make_option('--thisweek',
action='store_true',
dest='week',
default=False,
help='Show entries from this week only'),
) + (
make_option('--thismonth',
action='store_true',
dest='month',
default=False,
help='Show entries from this month only'),
) + (
make_option('-y', '--thisyear',
action='store_true',
dest='year',
default=False,
help='Show entries from this year only'),
) + (
make_option('-a', '--all', '--forever',
action='store_true',
dest='all',
default=False,
help='Show entries from all recorded history'),
) + (
make_option('-d', '--days',
dest='days',
type='int',
default=0,
help='Show entries for the last n days only'),
)
option_list = BaseCommand.option_list + make_options(*args)
parser.add_options(option_list)
(options, args) = parser.parse_args()
def handle(self, *args, **kwargs):
"""
main()
"""
verbosity = kwargs.get('verbosity', 1)
start = self.find_start(**kwargs)
users = self.find_users(*args)
self.show_init(start, *args, **kwargs)
all_entries = self.find_entries(users, start, *args, **kwargs)
all_overlaps = self.check_all(all_entries, *args, **kwargs)
if verbosity >= 1:
print 'Total overlapping entries: %d' % all_overlaps
def check_all(self, all_entries, *args, **kwargs):
"""
Go through lists of entries, find overlaps among each, return the total
"""
all_overlaps = 0
while True:
try:
user_entries = all_entries.next()
except StopIteration:
return all_overlaps
else:
user_total_overlaps = self.check_entry(
user_entries, *args, **kwargs)
all_overlaps += user_total_overlaps
def check_entry(self, entries, *args, **kwargs):
"""
With a list of entries, check each entry against every other
"""
verbosity = kwargs.get('verbosity', 1)
user_total_overlaps = 0
user = ''
for index_a, entry_a in enumerate(entries):
#Show the name the first time through
if index_a == 0:
if args and verbosity >= 1 or verbosity >= 2:
self.show_name(entry_a.user)
user = entry_a.user
for index_b in range(index_a, len(entries)):
entry_b = entries[index_b]
if entry_a.check_overlap(entry_b):
user_total_overlaps += 1
self.show_overlap(entry_a, entry_b, verbosity=verbosity)
if user_total_overlaps and user and verbosity >= 1:
overlap_data = {
'first': user.first_name,
'last': user.last_name,
'total': user_total_overlaps,
}
print 'Total overlapping entries for user ' + \
'%(first)s %(last)s: %(total)d' % overlap_data
return user_total_overlaps
def find_start(self, **kwargs):
"""
Determine the starting point of the query using CLI keyword arguments
"""
week = kwargs.get('week', False)
month = kwargs.get('month', False)
year = kwargs.get('year', False)
days = kwargs.get('days', 0)
#If no flags are True, set to the beginning of last billing window
#to assure we catch all recent violations
start = timezone.now() - relativedelta(months=1, day=1)
#Set the start date based on arguments provided through options
if week:
start = utils.get_week_start()
if month:
start = timezone.now() - relativedelta(day=1)
if year:
start = timezone.now() - relativedelta(day=1, month=1)
if days:
start = timezone.now() - relativedelta(days=days)
start -= relativedelta(hour=0, minute=0, second=0, microsecond=0)
return start
def find_users(self, *args):
"""
Returns the users to search given names as args.
Return all users if there are no args provided.
"""
if args:
names = reduce(lambda query, arg: query |
(Q(first_name__icontains=arg) | Q(last_name__icontains=arg)),
args, Q())
users = User.objects.filter(names)
#If no args given, check every user
else:
users = User.objects.all()
#Display errors if no user was found
if not users.count() and args:
if len(args) == 1:
raise CommandError('No user was found with the name %s' \
% args[0])
else:
arg_list = ', '.join(args)
raise CommandError('No users found with the names: %s' \
% arg_list)
return users
def find_entries(self, users, start, *args, **kwargs):
"""
Find all entries for all users, from a given starting point.
If no starting point is provided, all entries are returned.
"""
forever = kwargs.get('all', False)
for user in users:
if forever:
entries = Entry.objects.filter(user=user).order_by('start_time')
else:
entries = Entry.objects.filter(
user=user, start_time__gte=start).order_by(
'start_time')
yield entries
#output methods
def show_init(self, start, *args, **kwargs):
forever = kwargs.get('all', False)
verbosity = kwargs.get('verbosity', 1)
if forever:
if verbosity >= 1:
print 'Checking overlaps from the beginning ' + \
'of time'
else:
if verbosity >= 1:
print 'Checking overlap starting on: ' + \
start.strftime('%m/%d/%Y')
def show_name(self, user):
print 'Checking %s %s...' % \
(user.first_name, user.last_name)
def show_overlap(self, entry_a, entry_b=None, **kwargs):
def make_output_data(entry):
return{
'first_name': entry.user.first_name,
'last_name': entry.user.last_name,
'entry': entry.id,
'start': entry.start_time,
'end': entry.end_time,
'project': entry.project
}
data_a = make_output_data(entry_a)
if entry_b:
data_b = make_output_data(entry_b)
output = 'Entry %(entry)d for %(first_name)s %(last_name)s from ' \
% data_a + '%(start)s to %(end)s on %(project)s overlaps ' \
% data_a + 'entry %(entry)d from %(start)s to %(end)s on ' \
% data_b + '%(project)s.' % data_b
else:
output = 'Entry %(entry)d for %(first_name)s %(last_name)s from ' \
% data_a + '%(start)s to %(end)s on %(project)s overlaps ' \
% data_a + 'with another entry.'
if kwargs.get('verbosity', 1):
print output
|
|
# Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import ddt
import iso8601
import mock
from oslo_config import cfg
from six.moves import http_client
import webob.exc
from cinder.api.contrib import services
from cinder.api import extensions
from cinder.api import microversions as mv
from cinder.common import constants
from cinder import context
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
CONF = cfg.CONF
fake_services_list = [
{'binary': 'cinder-scheduler',
'host': 'host1',
'cluster_name': None,
'availability_zone': 'cinder',
'id': 1,
'disabled': True,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27),
'disabled_reason': 'test1',
'modified_at': '',
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'},
{'binary': 'cinder-volume',
'host': 'host1',
'cluster_name': None,
'availability_zone': 'cinder',
'id': 2,
'disabled': True,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27),
'disabled_reason': 'test2',
'modified_at': '',
'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e'},
{'binary': 'cinder-scheduler',
'host': 'host2',
'cluster_name': 'cluster1',
'availability_zone': 'cinder',
'id': 3,
'disabled': False,
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': '',
'modified_at': '',
'uuid': '6d91e7f5-ca17-4e3b-bf4f-19ca77166dd7'},
{'binary': 'cinder-volume',
'host': 'host2',
'cluster_name': 'cluster1',
'availability_zone': 'cinder',
'id': 4,
'disabled': True,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': 'test4',
'modified_at': '',
'uuid': '18417850-2ca9-43d1-9619-ae16bfb0f655'},
{'binary': 'cinder-volume',
'host': 'host2',
'cluster_name': 'cluster2',
'availability_zone': 'cinder',
'id': 5,
'disabled': True,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': 'test5',
'modified_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'uuid': 'f838f35c-4035-464f-9792-ce60e390c13d'},
{'binary': 'cinder-volume',
'host': 'host2',
'cluster_name': 'cluster2',
'availability_zone': 'cinder',
'id': 6,
'disabled': False,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': '',
'modified_at': datetime.datetime(2012, 9, 18, 8, 1, 38),
'uuid': 'f2825a00-cc2f-493d-9635-003e01db8b3d'},
{'binary': 'cinder-scheduler',
'host': 'host2',
'cluster_name': None,
'availability_zone': 'cinder',
'id': 7,
'disabled': False,
'updated_at': None,
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': '',
'modified_at': None,
'uuid': '35fcf841-1974-4944-a798-1fb6d0a44972'},
]
class FakeRequest(object):
environ = {"cinder.context": context.get_admin_context()}
def __init__(self, version=mv.BASE_VERSION, **kwargs):
self.GET = kwargs
self.headers = mv.get_mv_header(version)
self.api_version_request = mv.get_api_version(version)
class FakeRequestWithBinary(FakeRequest):
def __init__(self, **kwargs):
kwargs.setdefault('binary', constants.VOLUME_BINARY)
super(FakeRequestWithBinary, self).__init__(**kwargs)
class FakeRequestWithHost(FakeRequest):
def __init__(self, **kwargs):
kwargs.setdefault('host', 'host1')
super(FakeRequestWithHost, self).__init__(**kwargs)
class FakeRequestWithHostBinary(FakeRequestWithBinary):
def __init__(self, **kwargs):
kwargs.setdefault('host', 'host1')
super(FakeRequestWithHostBinary, self).__init__(**kwargs)
def fake_service_get_all(context, **filters):
result = []
host = filters.pop('host', None)
for service in fake_services_list:
if (host and service['host'] != host and
not service['host'].startswith(host + '@')):
continue
if all(v is None or service.get(k) == v for k, v in filters.items()):
result.append(service)
return result
def fake_service_get(context, service_id=None, **filters):
result = fake_service_get_all(context, id=service_id, **filters)
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result[0]
def fake_service_get_by_id(value):
for service in fake_services_list:
if service['id'] == value:
return service
return None
def fake_service_update(context, service_id, values):
service = fake_service_get_by_id(service_id)
if service is None:
raise exception.ServiceNotFound(service_id=service_id)
else:
{'host': 'host1', 'service': constants.VOLUME_BINARY,
'disabled': values['disabled']}
def fake_policy_authorize(context, action, target,
do_raise=True, exc=exception.PolicyNotAuthorized):
pass
def fake_utcnow(with_timezone=False):
tzinfo = iso8601.UTC if with_timezone else None
return datetime.datetime(2012, 10, 29, 13, 42, 11, tzinfo=tzinfo)
def fake_get_pools(ctxt, filters=None):
return [{"name": "host1", "capabilities": {"backend_state": "up"}},
{"name": "host2", "capabilities": {"backend_state": "down"}}]
@ddt.ddt
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools', fake_get_pools)
@mock.patch('cinder.db.service_get_all', fake_service_get_all)
@mock.patch('cinder.db.service_get', fake_service_get)
@mock.patch('oslo_utils.timeutils.utcnow', fake_utcnow)
@mock.patch('cinder.db.sqlalchemy.api.service_update', fake_service_update)
@mock.patch('cinder.policy.authorize', fake_policy_authorize)
class ServicesTest(test.TestCase):
def setUp(self):
super(ServicesTest, self).setUp()
self.context = context.get_admin_context()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = services.ServiceController(self.ext_mgr)
def test_services_list(self):
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-scheduler',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 2)},
{'binary': 'cinder-volume',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5)},
{'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 19, 6, 55, 34)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38)},
{'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': None},
]}
self.assertEqual(response, res_dict)
def test_failover_old_version(self):
req = FakeRequest(version=mv.BACKUP_PROJECT)
self.assertRaises(exception.InvalidInput, self.controller.update, req,
'failover', {'cluster': 'cluster1'})
def test_failover_no_values(self):
req = FakeRequest(version=mv.REPLICATION_CLUSTER)
self.assertRaises(exception.InvalidInput, self.controller.update, req,
'failover', {'backend_id': 'replica1'})
@ddt.data({'host': 'hostname'}, {'cluster': 'mycluster'})
@mock.patch('cinder.volume.api.API.failover')
def test_failover(self, body, failover_mock):
req = FakeRequest(version=mv.REPLICATION_CLUSTER)
body['backend_id'] = 'replica1'
res = self.controller.update(req, 'failover', body)
self.assertEqual(202, res.status_code)
failover_mock.assert_called_once_with(req.environ['cinder.context'],
body.get('host'),
body.get('cluster'), 'replica1')
@ddt.data({}, {'host': 'hostname', 'cluster': 'mycluster'})
@mock.patch('cinder.volume.api.API.failover')
def test_failover_invalid_input(self, body, failover_mock):
req = FakeRequest(version=mv.REPLICATION_CLUSTER)
body['backend_id'] = 'replica1'
self.assertRaises(exception.InvalidInput,
self.controller.update, req, 'failover', body)
failover_mock.assert_not_called()
def test_services_list_with_cluster_name(self):
req = FakeRequest(version=mv.CLUSTER_SUPPORT)
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-scheduler',
'cluster': None,
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 2)},
{'binary': 'cinder-volume',
'cluster': None,
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5)},
{'binary': 'cinder-scheduler',
'cluster': 'cluster1',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 19, 6, 55, 34)},
{'binary': 'cinder-volume',
'cluster': 'cluster1',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38)},
{'binary': 'cinder-volume',
'cluster': 'cluster2',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5)},
{'binary': 'cinder-volume',
'cluster': 'cluster2',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38)},
{'binary': 'cinder-scheduler',
'cluster': None,
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': None},
]}
self.assertEqual(response, res_dict)
def test_services_list_with_backend_state(self):
req = FakeRequest(version=mv.BACKEND_STATE_REPORT)
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-scheduler',
'cluster': None,
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 2)},
{'binary': 'cinder-volume',
'cluster': None,
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5),
'backend_state': 'up'},
{'binary': 'cinder-scheduler',
'cluster': 'cluster1',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 19, 6, 55, 34)},
{'binary': 'cinder-volume',
'cluster': 'cluster1',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38),
'backend_state': 'down'},
{'binary': 'cinder-volume',
'cluster': 'cluster2',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5),
'backend_state': 'down'},
{'binary': 'cinder-volume',
'cluster': 'cluster2',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38),
'backend_state': 'down'},
{'binary': 'cinder-scheduler',
'cluster': None,
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': None},
]}
self.assertEqual(response, res_dict)
def test_services_detail(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-scheduler',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'frozen': False,
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 19, 6, 55, 34),
'disabled_reason': ''},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'frozen': False,
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38),
'disabled_reason': 'test4'},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'frozen': False,
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test5'},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'frozen': False,
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38),
'disabled_reason': ''},
{'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': None,
'disabled_reason': ''},
]}
self.assertEqual(response, res_dict)
def test_services_list_with_host(self):
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-scheduler',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10,
29, 13, 42, 2)},
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)}]}
self.assertEqual(response, res_dict)
def test_services_detail_with_host(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-scheduler',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10,
29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'cinder-volume',
'frozen': False,
'replication_status': None,
'active_backend_id': None,
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'}]}
self.assertEqual(response, res_dict)
def test_services_list_with_binary(self):
req = FakeRequestWithBinary()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38)}]}
self.assertEqual(response, res_dict)
def test_services_detail_with_binary(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequestWithBinary()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'frozen': False,
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'frozen': False,
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38),
'disabled_reason': 'test4'},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'frozen': False,
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test5'},
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'host': 'host2',
'zone': 'cinder',
'status': 'enabled',
'state': 'down',
'frozen': False,
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38),
'disabled_reason': ''}]}
self.assertEqual(response, res_dict)
def test_services_list_with_host_binary(self):
req = FakeRequestWithHostBinary()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)}]}
self.assertEqual(response, res_dict)
def test_services_detail_with_host_binary(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequestWithHostBinary()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'cinder-volume',
'replication_status': None,
'active_backend_id': None,
'frozen': False,
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'}]}
self.assertEqual(response, res_dict)
def test_services_enable_with_service_key(self):
body = {'host': 'host1', 'service': constants.VOLUME_BINARY}
req = fakes.HTTPRequest.blank(
'/v2/%s/os-services/enable' % fake.PROJECT_ID)
res_dict = self.controller.update(req, "enable", body)
self.assertEqual('enabled', res_dict['status'])
def test_services_enable_with_binary_key(self):
body = {'host': 'host1', 'binary': constants.VOLUME_BINARY}
req = fakes.HTTPRequest.blank(
'/v2/%s/os-services/enable' % fake.PROJECT_ID)
res_dict = self.controller.update(req, "enable", body)
self.assertEqual('enabled', res_dict['status'])
def test_services_disable_with_service_key(self):
req = fakes.HTTPRequest.blank(
'/v2/%s/os-services/disable' % fake.PROJECT_ID)
body = {'host': 'host1', 'service': constants.VOLUME_BINARY}
res_dict = self.controller.update(req, "disable", body)
self.assertEqual('disabled', res_dict['status'])
def test_services_disable_with_binary_key(self):
req = fakes.HTTPRequest.blank(
'/v2/%s/os-services/disable' % fake.PROJECT_ID)
body = {'host': 'host1', 'binary': constants.VOLUME_BINARY}
res_dict = self.controller.update(req, "disable", body)
self.assertEqual('disabled', res_dict['status'])
def test_services_disable_log_reason(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = (
fakes.HTTPRequest.blank('v1/fake/os-services/disable-log-reason'))
body = {'host': 'host1',
'binary': 'cinder-scheduler',
'disabled_reason': 'test-reason',
}
res_dict = self.controller.update(req, "disable-log-reason", body)
self.assertEqual('disabled', res_dict['status'])
self.assertEqual('test-reason', res_dict['disabled_reason'])
def test_services_disable_log_reason_unicode(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = (
fakes.HTTPRequest.blank('v1/fake/os-services/disable-log-reason'))
body = {'host': 'host1',
'binary': 'cinder-scheduler',
'disabled_reason': u'test-reason',
}
res_dict = self.controller.update(req, "disable-log-reason", body)
self.assertEqual('disabled', res_dict['status'])
self.assertEqual('test-reason', res_dict['disabled_reason'])
def test_services_disable_log_reason_none(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = (
fakes.HTTPRequest.blank('v1/fake/os-services/disable-log-reason'))
body = {'host': 'host1',
'binary': 'cinder-scheduler',
'disabled_reason': None,
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
req, "disable-log-reason", body)
def test_invalid_reason_field(self):
# Check that empty strings are not allowed
reason = ' ' * 10
self.assertFalse(self.controller._is_valid_as_reason(reason))
reason = 'a' * 256
self.assertFalse(self.controller._is_valid_as_reason(reason))
# Check that spaces at the end are also counted
reason = 'a' * 255 + ' '
self.assertFalse(self.controller._is_valid_as_reason(reason))
reason = 'it\'s a valid reason.'
self.assertTrue(self.controller._is_valid_as_reason(reason))
reason = None
self.assertFalse(self.controller._is_valid_as_reason(reason))
def test_services_failover_host(self):
url = '/v2/%s/os-services/failover_host' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url)
body = {'host': mock.sentinel.host,
'backend_id': mock.sentinel.backend_id}
with mock.patch.object(self.controller.volume_api, 'failover') \
as failover_mock:
res = self.controller.update(req, 'failover_host', body)
failover_mock.assert_called_once_with(req.environ['cinder.context'],
mock.sentinel.host,
None,
mock.sentinel.backend_id)
self.assertEqual(http_client.ACCEPTED, res.status_code)
@ddt.data(('failover_host', {'host': mock.sentinel.host,
'backend_id': mock.sentinel.backend_id}),
('freeze', {'host': mock.sentinel.host}),
('thaw', {'host': mock.sentinel.host}))
@ddt.unpack
@mock.patch('cinder.objects.ServiceList.get_all')
def test_services_action_host_not_found(self, method, body,
mock_get_all_services):
url = '/v2/%s/os-services/%s' % (fake.PROJECT_ID, method)
req = fakes.HTTPRequest.blank(url)
mock_get_all_services.return_value = []
msg = 'No service found with host=%s' % mock.sentinel.host
result = self.assertRaises(exception.InvalidInput,
self.controller.update,
req, method, body)
self.assertEqual(msg, result.msg)
@ddt.data(('failover', {'cluster': mock.sentinel.cluster,
'backend_id': mock.sentinel.backend_id}),
('freeze', {'cluster': mock.sentinel.cluster}),
('thaw', {'cluster': mock.sentinel.cluster}))
@ddt.unpack
@mock.patch('cinder.objects.ServiceList.get_all')
def test_services_action_cluster_not_found(self, method, body,
mock_get_all_services):
url = '/v3/%s/os-services/%s' % (fake.PROJECT_ID, method)
req = fakes.HTTPRequest.blank(url, version=mv.REPLICATION_CLUSTER)
mock_get_all_services.return_value = []
msg = 'No service found with cluster=%s' % mock.sentinel.cluster
result = self.assertRaises(exception.InvalidInput,
self.controller.update, req,
method, body)
self.assertEqual(msg, result.msg)
def test_services_freeze(self):
url = '/v2/%s/os-services/freeze' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url)
body = {'host': mock.sentinel.host}
with mock.patch.object(self.controller.volume_api, 'freeze_host') \
as freeze_mock:
res = self.controller.update(req, 'freeze', body)
freeze_mock.assert_called_once_with(req.environ['cinder.context'],
mock.sentinel.host, None)
self.assertEqual(freeze_mock.return_value, res)
def test_services_thaw(self):
url = '/v2/%s/os-services/thaw' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url)
body = {'host': mock.sentinel.host}
with mock.patch.object(self.controller.volume_api, 'thaw_host') \
as thaw_mock:
res = self.controller.update(req, 'thaw', body)
thaw_mock.assert_called_once_with(req.environ['cinder.context'],
mock.sentinel.host, None)
self.assertEqual(thaw_mock.return_value, res)
@ddt.data('freeze', 'thaw', 'failover_host')
def test_services_replication_calls_no_host(self, method):
url = '/v2/%s/os-services/%s' % (fake.PROJECT_ID, method)
req = fakes.HTTPRequest.blank(url)
self.assertRaises(exception.InvalidInput,
self.controller.update, req, method, {})
@mock.patch('cinder.api.contrib.services.ServiceController._set_log')
def test_set_log(self, set_log_mock):
set_log_mock.return_value = None
req = FakeRequest(version=mv.LOG_LEVEL)
body = mock.sentinel.body
res = self.controller.update(req, 'set-log', body)
self.assertEqual(set_log_mock.return_value, res)
set_log_mock.assert_called_once_with(mock.ANY, body)
@mock.patch('cinder.api.contrib.services.ServiceController._get_log')
def test_get_log(self, get_log_mock):
get_log_mock.return_value = None
req = FakeRequest(version=mv.LOG_LEVEL)
body = mock.sentinel.body
res = self.controller.update(req, 'get-log', body)
self.assertEqual(get_log_mock.return_value, res)
get_log_mock.assert_called_once_with(mock.ANY, body)
def test__log_params_binaries_services_wrong_binary(self):
body = {'binary': 'wrong-binary'}
self.assertRaises(exception.InvalidInput,
self.controller._log_params_binaries_services,
'get-log', body)
@ddt.data(None, '', '*')
@mock.patch('cinder.objects.ServiceList.get_all')
def test__log_params_binaries_service_all(self, binary, service_list_mock):
body = {'binary': binary, 'server': 'host1'}
binaries, services = self.controller._log_params_binaries_services(
mock.sentinel.context, body)
self.assertEqual(self.controller.LOG_BINARIES, binaries)
self.assertEqual(service_list_mock.return_value, services)
service_list_mock.assert_called_once_with(
mock.sentinel.context, filters={'host_or_cluster': body['server'],
'is_up': True})
@ddt.data('cinder-api', 'cinder-volume', 'cinder-scheduler',
'cinder-backup')
@mock.patch('cinder.objects.ServiceList.get_all')
def test__log_params_binaries_service_one(self, binary, service_list_mock):
body = {'binary': binary, 'server': 'host1'}
binaries, services = self.controller._log_params_binaries_services(
mock.sentinel.context, body)
self.assertEqual([binary], binaries)
if binary == constants.API_BINARY:
self.assertEqual([], services)
service_list_mock.assert_not_called()
else:
self.assertEqual(service_list_mock.return_value, services)
service_list_mock.assert_called_once_with(
mock.sentinel.context,
filters={'host_or_cluster': body['server'], 'binary': binary,
'is_up': True})
@ddt.data(None, '', 'wronglevel')
def test__set_log_invalid_level(self, level):
body = {'level': level}
self.assertRaises(exception.InvalidInput,
self.controller._set_log, self.context, body)
@mock.patch('cinder.utils.get_log_method')
@mock.patch('cinder.objects.ServiceList.get_all')
@mock.patch('cinder.utils.set_log_levels')
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.set_log_levels')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.set_log_levels')
@mock.patch('cinder.backup.rpcapi.BackupAPI.set_log_levels')
def test__set_log(self, backup_rpc_mock, vol_rpc_mock, sch_rpc_mock,
set_log_mock, get_all_mock, get_log_mock):
services = [
objects.Service(self.context, binary=constants.SCHEDULER_BINARY),
objects.Service(self.context, binary=constants.VOLUME_BINARY),
objects.Service(self.context, binary=constants.BACKUP_BINARY),
]
get_all_mock.return_value = services
body = {'binary': '*', 'prefix': 'eventlet.', 'level': 'debug'}
log_level = objects.LogLevel(prefix=body['prefix'],
level=body['level'])
with mock.patch('cinder.objects.LogLevel') as log_level_mock:
log_level_mock.return_value = log_level
res = self.controller._set_log(mock.sentinel.context, body)
log_level_mock.assert_called_once_with(mock.sentinel.context,
prefix=body['prefix'],
level=body['level'])
self.assertEqual(202, res.status_code)
set_log_mock.assert_called_once_with(body['prefix'], body['level'])
sch_rpc_mock.assert_called_once_with(mock.sentinel.context,
services[0], log_level)
vol_rpc_mock.assert_called_once_with(mock.sentinel.context,
services[1], log_level)
backup_rpc_mock.assert_called_once_with(mock.sentinel.context,
services[2], log_level)
get_log_mock.assert_called_once_with(body['level'])
@mock.patch('cinder.objects.ServiceList.get_all')
@mock.patch('cinder.utils.get_log_levels')
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_log_levels')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_log_levels')
@mock.patch('cinder.backup.rpcapi.BackupAPI.get_log_levels')
def test__get_log(self, backup_rpc_mock, vol_rpc_mock, sch_rpc_mock,
get_log_mock, get_all_mock):
get_log_mock.return_value = mock.sentinel.api_levels
backup_rpc_mock.return_value = [
objects.LogLevel(prefix='p1', level='l1'),
objects.LogLevel(prefix='p2', level='l2')
]
vol_rpc_mock.return_value = [
objects.LogLevel(prefix='p3', level='l3'),
objects.LogLevel(prefix='p4', level='l4')
]
sch_rpc_mock.return_value = [
objects.LogLevel(prefix='p5', level='l5'),
objects.LogLevel(prefix='p6', level='l6')
]
services = [
objects.Service(self.context, binary=constants.SCHEDULER_BINARY,
host='host'),
objects.Service(self.context, binary=constants.VOLUME_BINARY,
host='host@backend#pool'),
objects.Service(self.context, binary=constants.BACKUP_BINARY,
host='host'),
]
get_all_mock.return_value = services
body = {'binary': '*', 'prefix': 'eventlet.'}
log_level = objects.LogLevel(prefix=body['prefix'])
with mock.patch('cinder.objects.LogLevel') as log_level_mock:
log_level_mock.return_value = log_level
res = self.controller._get_log(mock.sentinel.context, body)
log_level_mock.assert_called_once_with(mock.sentinel.context,
prefix=body['prefix'])
expected = {'log_levels': [
{'binary': 'cinder-api',
'host': CONF.host,
'levels': mock.sentinel.api_levels},
{'binary': 'cinder-scheduler', 'host': 'host',
'levels': {'p5': 'l5', 'p6': 'l6'}},
{'binary': constants.VOLUME_BINARY,
'host': 'host@backend#pool',
'levels': {'p3': 'l3', 'p4': 'l4'}},
{'binary': 'cinder-backup', 'host': 'host',
'levels': {'p1': 'l1', 'p2': 'l2'}},
]}
self.assertDictEqual(expected, res)
get_log_mock.assert_called_once_with(body['prefix'])
sch_rpc_mock.assert_called_once_with(mock.sentinel.context,
services[0], log_level)
vol_rpc_mock.assert_called_once_with(mock.sentinel.context,
services[1], log_level)
backup_rpc_mock.assert_called_once_with(mock.sentinel.context,
services[2], log_level)
|
|
import argparse
import time
import collections
import copy
import math
import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
import data
import model
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank RNN/LSTM Language Model')
parser.add_argument('--data', type=str, default='./data/penn',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--emsize', type=int, default=200,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=200,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=2,
help='number of layers')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=40,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=20, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--bptt_step', type=int, default=None,
help='bptt step size')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='model.pt',
help='path to save the final model')
args = parser.parse_args()
args.bptt_step = args.bptt_step if args.bptt_step else args.bptt
print('bptt step size is %d' % args.bptt_step)
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
corpus = data.Corpus(args.data)
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
if args.cuda:
data = data.cuda()
return data
eval_batch_size = 10
train_data = batchify(corpus.train, args.batch_size)
val_data = batchify(corpus.valid, eval_batch_size)
test_data = batchify(corpus.test, eval_batch_size)
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied)
if args.cuda:
model.cuda()
criterion = nn.CrossEntropyLoss(size_average=False)
###############################################################################
# Training code
###############################################################################
def repackage_hidden(h, volatile=False, requires_grad=False):
"""Wraps hidden states in new Variables, to detach them from their history."""
if type(h) == Variable:
return Variable(h.data, volatile=volatile, requires_grad=requires_grad)
else:
return tuple(repackage_hidden(v, volatile=volatile,
requires_grad=requires_grad) for v in h)
def get_batch(source, i, evaluation=False):
seq_len = min(args.bptt, len(source) - 1 - i)
data = Variable(source[i:i+seq_len], volatile=evaluation)
#target = Variable(source[i+1:i+1+seq_len].view(-1))
target = Variable(source[i+1:i+1+seq_len])
return data, target
def evaluate(data_source):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(eval_batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, evaluation=True)
output, hidden = model(data, hidden)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets.view(-1)).data
hidden = repackage_hidden(hidden)
return total_loss[0] / (len(data_source)*eval_batch_size*args.bptt)
def train():
# Turn on training mode which enables dropout.
model.train()
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(args.batch_size)
for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
data, targets = get_batch(train_data, i)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
model.zero_grad()
# original:
# hidden = repackage_hidden(hidden)
#output, hidden = model(data, hidden)
#loss = criterion(output.view(-1, ntokens), targets)
#loss.backward()
# Begin bptt hsm code
hidden_v = repackage_hidden(hidden, volatile=True)
data_v, _ = get_batch(train_data, i, evaluation=True)
hsm = { -1 : repackage_hidden(hidden) }
intervals = list(enumerate(range(0, data.size(0), args.bptt_step)))
# Record states at selective intervals and flag the need for grads.
# Note we don't need to forward the last interval as we'll do it below.
# This loop is most of the extra computation for this approach.
for f_i,f_v in intervals[:-1]:
output,hidden_v = model(data_v[f_v:f_v+args.bptt_step], hidden_v)
hsm[f_i] = repackage_hidden(hidden_v, volatile=False,
requires_grad=True)
save_grad=None
loss = 0
for b_i, b_v in reversed(intervals):
output,h = model(data[b_v:b_v+args.bptt_step], hsm[b_i-1])
iloss = criterion(output.view(-1, ntokens),
targets[b_v:b_v+args.bptt_step].view(-1))
if b_v+args.bptt_step >= data.size(0):
# No gradient from the future needed.
# These are the hidden states for the next sequence.
hidden = h
iloss.backward()
else:
variables=[iloss]
grad_variables=[None] # scalar = None
# Associate stored gradients with state variables for
# multi-variable backprop
for l in h:
variables.append(l)
g = save_grad.popleft()
grad_variables.append(g)
torch.autograd.backward(variables, grad_variables)
if b_i > 0:
# Save the gradients left on the input state variables
save_grad = collections.deque()
for l in hsm[b_i-1]:
# If this fails, could be a non-leaf, in which case exclude;
# its grad will be handled by a leaf
assert(l.grad is not None)
save_grad.append(l.grad)
loss += iloss.data[0]
av = 1/(args.batch_size*args.bptt)
loss *= av
for g in model.parameters():
g.grad.data.mul_(av)
# end bptt hsm code
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
for p in model.parameters():
p.data.add_(-lr, p.grad.data)
total_loss += loss
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss / args.log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, lr,
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
# Loop over epochs.
lr = args.lr
best_val_loss = None
# At any point you can hit Ctrl + C to break out of training early.
try:
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
val_loss = evaluate(val_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
print('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(args.save, 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4.0
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb') as f:
model = torch.load(f)
# Run on test data.
test_loss = evaluate(test_data)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
|
|
"""Jedi Language Server.
Creates the language server constant and wraps "features" with it.
Official language server spec:
https://microsoft.github.io/language-server-protocol/specification
"""
import itertools
from typing import Any, List, Optional, Union
from jedi import Project
from jedi.api.refactoring import RefactoringError
from pydantic import ValidationError
from pygls.lsp.methods import (
CODE_ACTION,
COMPLETION,
COMPLETION_ITEM_RESOLVE,
DEFINITION,
DOCUMENT_HIGHLIGHT,
DOCUMENT_SYMBOL,
HOVER,
REFERENCES,
RENAME,
SIGNATURE_HELP,
TEXT_DOCUMENT_DID_CHANGE,
TEXT_DOCUMENT_DID_OPEN,
TEXT_DOCUMENT_DID_SAVE,
WORKSPACE_DID_CHANGE_CONFIGURATION,
WORKSPACE_SYMBOL,
)
from pygls.lsp.types import (
CodeAction,
CodeActionKind,
CodeActionOptions,
CodeActionParams,
CompletionItem,
CompletionList,
CompletionOptions,
CompletionParams,
DidChangeConfigurationParams,
DidChangeTextDocumentParams,
DidOpenTextDocumentParams,
DidSaveTextDocumentParams,
DocumentHighlight,
DocumentSymbol,
DocumentSymbolParams,
Hover,
InitializeParams,
InitializeResult,
Location,
MarkupContent,
MarkupKind,
MessageType,
ParameterInformation,
RenameParams,
SignatureHelp,
SignatureHelpOptions,
SignatureInformation,
SymbolInformation,
TextDocumentPositionParams,
WorkspaceEdit,
WorkspaceSymbolParams,
)
from pygls.protocol import LanguageServerProtocol
from pygls.server import LanguageServer
from . import jedi_utils, pygls_utils, text_edit_utils
from .initialization_options import InitializationOptions
class JediLanguageServerProtocol(LanguageServerProtocol):
"""Override some built-in functions."""
def bf_initialize(self, params: InitializeParams) -> InitializeResult:
"""Override built-in initialization.
Here, we can conditionally register functions to features based
on client capabilities and initializationOptions.
"""
server: "JediLanguageServer" = self._server
try:
server.initialization_options = InitializationOptions.parse_obj(
{}
if params.initialization_options is None
else params.initialization_options
)
except ValidationError as error:
msg = f"Invalid InitializationOptions, using defaults: {error}"
server.show_message(msg, msg_type=MessageType.Error)
server.show_message_log(msg, msg_type=MessageType.Error)
server.initialization_options = InitializationOptions()
initialization_options = server.initialization_options
jedi_utils.set_jedi_settings(initialization_options)
# Configure didOpen, didChange, and didSave
# currently need to be configured manually
diagnostics = initialization_options.diagnostics
did_open = (
did_open_diagnostics
if diagnostics.enable and diagnostics.did_open
else did_open_default
)
did_change = (
did_change_diagnostics
if diagnostics.enable and diagnostics.did_change
else did_change_default
)
did_save = (
did_save_diagnostics
if diagnostics.enable and diagnostics.did_save
else did_save_default
)
server.feature(TEXT_DOCUMENT_DID_OPEN)(did_open)
server.feature(TEXT_DOCUMENT_DID_CHANGE)(did_change)
server.feature(TEXT_DOCUMENT_DID_SAVE)(did_save)
initialize_result: InitializeResult = super().bf_initialize(params)
server.project = (
Project(
path=server.workspace.root_path,
added_sys_path=initialization_options.workspace.extra_paths,
smart_sys_path=True,
load_unsafe_extensions=False,
)
if server.workspace.root_path
else None
)
return initialize_result
class JediLanguageServer(LanguageServer):
"""Jedi language server.
:attr initialization_options: initialized in bf_initialize from the
protocol_cls.
:attr project: a Jedi project. This value is created in
`JediLanguageServerProtocol.bf_initialize`.
"""
initialization_options: InitializationOptions
project: Optional[Project]
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
SERVER = JediLanguageServer(protocol_cls=JediLanguageServerProtocol)
# Server capabilities
@SERVER.feature(COMPLETION_ITEM_RESOLVE)
def completion_item_resolve(
server: JediLanguageServer, params: CompletionItem
) -> CompletionItem:
"""Resolves documentation and detail of given completion item."""
markup_kind = _choose_markup(server)
return jedi_utils.lsp_completion_item_resolve(
params, markup_kind=markup_kind
)
@SERVER.feature(
COMPLETION,
CompletionOptions(
trigger_characters=[".", "'", '"'], resolve_provider=True
),
)
def completion(
server: JediLanguageServer, params: CompletionParams
) -> Optional[CompletionList]:
"""Returns completion items."""
document = server.workspace.get_document(params.text_document.uri)
jedi_script = jedi_utils.script(server.project, document)
jedi_lines = jedi_utils.line_column(jedi_script, params.position)
completions_jedi = jedi_script.complete(**jedi_lines)
snippet_support = server.client_capabilities.get_capability(
"text_document.completion.completion_item.snippet_support", False
)
snippet_disable = server.initialization_options.completion.disable_snippets
resolve_eagerly = server.initialization_options.completion.resolve_eagerly
markup_kind = _choose_markup(server)
is_import_context = jedi_utils.is_import(
script_=jedi_script,
line=jedi_lines["line"],
column=jedi_lines["column"],
)
enable_snippets = (
snippet_support and not snippet_disable and not is_import_context
)
char_before_cursor = pygls_utils.char_before_cursor(
document=server.workspace.get_document(params.text_document.uri),
position=params.position,
)
jedi_utils.clear_completions_cache()
completion_items = [
jedi_utils.lsp_completion_item(
completion=completion,
char_before_cursor=char_before_cursor,
enable_snippets=enable_snippets,
resolve_eagerly=resolve_eagerly,
markup_kind=markup_kind,
)
for completion in completions_jedi
]
return (
CompletionList(is_incomplete=False, items=completion_items)
if completion_items
else None
)
@SERVER.feature(
SIGNATURE_HELP, SignatureHelpOptions(trigger_characters=["(", ","])
)
def signature_help(
server: JediLanguageServer, params: TextDocumentPositionParams
) -> Optional[SignatureHelp]:
"""Returns signature help.
Note: for docstring, we currently choose plaintext because coc doesn't
handle markdown well in the signature. Will update if this changes in the
future.
"""
document = server.workspace.get_document(params.text_document.uri)
jedi_script = jedi_utils.script(server.project, document)
jedi_lines = jedi_utils.line_column(jedi_script, params.position)
signatures_jedi = jedi_script.get_signatures(**jedi_lines)
markup_kind = _choose_markup(server)
signatures = [
SignatureInformation(
label=signature.to_string(),
documentation=MarkupContent(
kind=markup_kind,
value=jedi_utils.convert_docstring(
signature.docstring(raw=True),
markup_kind,
),
),
parameters=[
ParameterInformation(label=info.to_string())
for info in signature.params
],
)
for signature in signatures_jedi
]
return (
SignatureHelp(
signatures=signatures,
active_signature=0,
active_parameter=(
signatures_jedi[0].index if signatures_jedi else 0
),
)
if signatures
else None
)
@SERVER.feature(DEFINITION)
def definition(
server: JediLanguageServer, params: TextDocumentPositionParams
) -> Optional[List[Location]]:
"""Support Goto Definition."""
document = server.workspace.get_document(params.text_document.uri)
jedi_script = jedi_utils.script(server.project, document)
jedi_lines = jedi_utils.line_column(jedi_script, params.position)
names = jedi_script.goto(
follow_imports=True,
follow_builtin_imports=True,
**jedi_lines,
)
definitions = [jedi_utils.lsp_location(name) for name in names]
return definitions if definitions else None
@SERVER.feature(DOCUMENT_HIGHLIGHT)
def highlight(
server: JediLanguageServer, params: TextDocumentPositionParams
) -> Optional[List[DocumentHighlight]]:
"""Support document highlight request.
This function is called frequently, so we minimize the number of expensive
calls. These calls are:
1. Getting assignment of current symbol (script.goto)
2. Getting all names in the current script (script.get_names)
Finally, we only return names if there are more than 1. Otherwise, we don't
want to highlight anything.
"""
document = server.workspace.get_document(params.text_document.uri)
jedi_script = jedi_utils.script(server.project, document)
jedi_lines = jedi_utils.line_column(jedi_script, params.position)
names = jedi_script.get_references(**jedi_lines, scope="file")
highlight_names = [
DocumentHighlight(range=jedi_utils.lsp_range(name)) for name in names
]
return highlight_names if highlight_names else None
@SERVER.feature(HOVER)
def hover(
server: JediLanguageServer, params: TextDocumentPositionParams
) -> Optional[Hover]:
"""Support Hover."""
document = server.workspace.get_document(params.text_document.uri)
jedi_script = jedi_utils.script(server.project, document)
jedi_lines = jedi_utils.line_column(jedi_script, params.position)
markup_kind = _choose_markup(server)
for name in jedi_script.help(**jedi_lines):
docstring = name.docstring()
if not docstring:
continue
docstring_clean = jedi_utils.convert_docstring(docstring, markup_kind)
contents = MarkupContent(kind=markup_kind, value=docstring_clean)
document = server.workspace.get_document(params.text_document.uri)
_range = pygls_utils.current_word_range(document, params.position)
return Hover(contents=contents, range=_range)
return None
@SERVER.feature(REFERENCES)
def references(
server: JediLanguageServer, params: TextDocumentPositionParams
) -> Optional[List[Location]]:
"""Obtain all references to text."""
document = server.workspace.get_document(params.text_document.uri)
jedi_script = jedi_utils.script(server.project, document)
jedi_lines = jedi_utils.line_column(jedi_script, params.position)
names = jedi_script.get_references(**jedi_lines)
locations = [jedi_utils.lsp_location(name) for name in names]
return locations if locations else None
@SERVER.feature(DOCUMENT_SYMBOL)
def document_symbol(
server: JediLanguageServer, params: DocumentSymbolParams
) -> Optional[Union[List[DocumentSymbol], List[SymbolInformation]]]:
"""Document Python document symbols, hierarchically if possible.
In Jedi, valid values for `name.type` are:
- `module`
- `class`
- `instance`
- `function`
- `param`
- `path`
- `keyword`
- `statement`
We do some cleaning here. For hierarchical symbols, names from scopes that
aren't directly accessible with dot notation are removed from display. For
non-hierarchical symbols, we simply remove `param` symbols. Others are
included for completeness.
"""
document = server.workspace.get_document(params.text_document.uri)
jedi_script = jedi_utils.script(server.project, document)
names = jedi_script.get_names(all_scopes=True, definitions=True)
if server.client_capabilities.get_capability(
"text_document.document_symbol.hierarchical_document_symbol_support",
False,
):
document_symbols = jedi_utils.lsp_document_symbols(names)
return document_symbols if document_symbols else None
symbol_information = [
jedi_utils.lsp_symbol_information(name)
for name in names
if name.type != "param"
]
return symbol_information if symbol_information else None
def _ignore_folder(path_check: str, jedi_ignore_folders: List[str]) -> bool:
"""Determines whether there's an ignore folder in the path.
Intended to be used with the `workspace_symbol` function
"""
for ignore_folder in jedi_ignore_folders:
if f"/{ignore_folder}/" in path_check:
return True
return False
@SERVER.feature(WORKSPACE_SYMBOL)
def workspace_symbol(
server: JediLanguageServer, params: WorkspaceSymbolParams
) -> Optional[List[SymbolInformation]]:
"""Document Python workspace symbols.
Returns up to maxSymbols, or all symbols if maxSymbols is <= 0, ignoring
the following symbols:
1. Those that don't have a module_path associated with them (built-ins)
2. Those that are not rooted in the current workspace.
3. Those whose folders contain a directory that is ignored (.venv, etc)
"""
if not server.project:
return None
names = server.project.complete_search(params.query)
workspace_root = server.workspace.root_path
ignore_folders = (
server.initialization_options.workspace.symbols.ignore_folders
)
_symbols = (
jedi_utils.lsp_symbol_information(name)
for name in names
if name.module_path
and str(name.module_path).startswith(workspace_root)
and not _ignore_folder(str(name.module_path), ignore_folders)
)
max_symbols = server.initialization_options.workspace.symbols.max_symbols
symbols = (
list(itertools.islice(_symbols, max_symbols))
if max_symbols > 0
else list(_symbols)
)
return symbols if symbols else None
@SERVER.feature(RENAME)
def rename(
server: JediLanguageServer, params: RenameParams
) -> Optional[WorkspaceEdit]:
"""Rename a symbol across a workspace."""
document = server.workspace.get_document(params.text_document.uri)
jedi_script = jedi_utils.script(server.project, document)
jedi_lines = jedi_utils.line_column(jedi_script, params.position)
try:
refactoring = jedi_script.rename(
new_name=params.new_name, **jedi_lines
)
except RefactoringError:
return None
changes = text_edit_utils.lsp_document_changes(
server.workspace, refactoring
)
return WorkspaceEdit(document_changes=changes) if changes else None
@SERVER.feature(
CODE_ACTION,
CodeActionOptions(
code_action_kinds=[
CodeActionKind.RefactorInline,
CodeActionKind.RefactorExtract,
],
),
)
def code_action(
server: JediLanguageServer, params: CodeActionParams
) -> Optional[List[CodeAction]]:
"""Get code actions.
Currently supports:
1. Inline variable
2. Extract variable
3. Extract function
"""
document = server.workspace.get_document(params.text_document.uri)
jedi_script = jedi_utils.script(server.project, document)
code_actions = []
jedi_lines = jedi_utils.line_column(jedi_script, params.range.start)
jedi_lines_extract = jedi_utils.line_column_range(params.range)
try:
if params.range.start.line != params.range.end.line:
# refactor this at some point; control flow with exception == bad
raise RefactoringError("inline only viable for single-line range")
inline_refactoring = jedi_script.inline(**jedi_lines)
except (RefactoringError, AttributeError, IndexError):
inline_changes = []
else:
inline_changes = text_edit_utils.lsp_document_changes(
server.workspace, inline_refactoring
)
if inline_changes:
code_actions.append(
CodeAction(
title="Inline variable",
kind=CodeActionKind.RefactorInline,
edit=WorkspaceEdit(
document_changes=inline_changes,
),
)
)
extract_var = (
server.initialization_options.code_action.name_extract_variable
)
try:
extract_variable_refactoring = jedi_script.extract_variable(
new_name=extract_var, **jedi_lines_extract
)
except (RefactoringError, AttributeError, IndexError):
extract_variable_changes = []
else:
extract_variable_changes = text_edit_utils.lsp_document_changes(
server.workspace, extract_variable_refactoring
)
if extract_variable_changes:
code_actions.append(
CodeAction(
title=f"Extract expression into variable '{extract_var}'",
kind=CodeActionKind.RefactorExtract,
edit=WorkspaceEdit(
document_changes=extract_variable_changes,
),
)
)
extract_func = (
server.initialization_options.code_action.name_extract_function
)
try:
extract_function_refactoring = jedi_script.extract_function(
new_name=extract_func, **jedi_lines_extract
)
except (RefactoringError, AttributeError, IndexError):
extract_function_changes = []
else:
extract_function_changes = text_edit_utils.lsp_document_changes(
server.workspace, extract_function_refactoring
)
if extract_function_changes:
code_actions.append(
CodeAction(
title=f"Extract expression into function '{extract_func}'",
kind=CodeActionKind.RefactorExtract,
edit=WorkspaceEdit(
document_changes=extract_function_changes,
),
)
)
return code_actions if code_actions else None
@SERVER.feature(WORKSPACE_DID_CHANGE_CONFIGURATION)
def did_change_configuration(
server: JediLanguageServer, # pylint: disable=unused-argument
params: DidChangeConfigurationParams, # pylint: disable=unused-argument
) -> None:
"""Implement event for workspace/didChangeConfiguration.
Currently does nothing, but necessary for pygls. See::
<https://github.com/pappasam/jedi-language-server/issues/58>
"""
# Static capability or initializeOptions functions that rely on a specific
# client capability or user configuration. These are associated with
# JediLanguageServer within JediLanguageServerProtocol.bf_initialize
def _publish_diagnostics(server: JediLanguageServer, uri: str) -> None:
"""Helper function to publish diagnostics for a file."""
document = server.workspace.get_document(uri)
jedi_script = jedi_utils.script(server.project, document)
errors = jedi_script.get_syntax_errors()
diagnostics = [jedi_utils.lsp_diagnostic(error) for error in errors]
server.publish_diagnostics(uri, diagnostics)
# TEXT_DOCUMENT_DID_SAVE
def did_save_diagnostics(
server: JediLanguageServer, params: DidSaveTextDocumentParams
) -> None:
"""Actions run on textDocument/didSave: diagnostics."""
_publish_diagnostics(server, params.text_document.uri)
def did_save_default(
server: JediLanguageServer, # pylint: disable=unused-argument
params: DidSaveTextDocumentParams, # pylint: disable=unused-argument
) -> None:
"""Actions run on textDocument/didSave: default."""
# TEXT_DOCUMENT_DID_CHANGE
def did_change_diagnostics(
server: JediLanguageServer, params: DidChangeTextDocumentParams
) -> None:
"""Actions run on textDocument/didChange: diagnostics."""
_publish_diagnostics(server, params.text_document.uri)
def did_change_default(
server: JediLanguageServer, # pylint: disable=unused-argument
params: DidChangeTextDocumentParams, # pylint: disable=unused-argument
) -> None:
"""Actions run on textDocument/didChange: default."""
# TEXT_DOCUMENT_DID_OPEN
def did_open_diagnostics(
server: JediLanguageServer, params: DidOpenTextDocumentParams
) -> None:
"""Actions run on textDocument/didOpen: diagnostics."""
_publish_diagnostics(server, params.text_document.uri)
def did_open_default(
server: JediLanguageServer, # pylint: disable=unused-argument
params: DidOpenTextDocumentParams, # pylint: disable=unused-argument
) -> None:
"""Actions run on textDocument/didOpen: default."""
def _choose_markup(server: JediLanguageServer) -> MarkupKind:
"""Returns the preferred or first of supported markup kinds."""
markup_preferred = server.initialization_options.markup_kind_preferred
markup_supported = server.client_capabilities.get_capability(
"text_document.completion.completion_item.documentation_format",
[MarkupKind.PlainText],
)
return MarkupKind(
markup_preferred
if markup_preferred in markup_supported
else markup_supported[0]
)
|
|
import requests
import flask
from flask import Flask, request, render_template, jsonify, redirect, url_for, make_response
from flask.ext.sqlalchemy import SQLAlchemy
import jwt
from encoder import jwt_encode
from logging import Formatter, FileHandler
import controller
from os import path
import models
import os
from threading import Thread
#Initialize Flask application
app = Flask(__name__)
PORT = int(os.environ.get('PORT', 5000))
is_prod = os.environ.get('IS_HEROKU', None)
app.config.from_object('config')
if is_prod:
BASE_CLIENT_URL = 'https://genomie.herokuapp.com'
CLIENT_ID = app.config.get('PROD_CLIENT_ID')
CLIENT_SECRET = app.config.get('PROD_CLIENT_SECRET')
REDIRECT_URI = app.config.get('PROD_REDIRECT_URI')
else:
#Gather data from config.py
BASE_CLIENT_URL = 'http://localhost:%s/'% PORT
CLIENT_ID = app.config.get('CLIENT_ID')
CLIENT_SECRET = app.config.get('CLIENT_SECRET')
REDIRECT_URI = app.config.get('REDIRECT_URI')
#Declaration of all necessary variables needed to perform 23AndMe API Call
DEFAULT_REDIRECT_URI = '%sreceive_code/' % BASE_CLIENT_URL
SNPS = ['rs12913832', 'rs8177374', 'rs1799971', 'rs806380', 'rs1800955', 'rs53576', 'rs1815739', 'rs6152', 'rs1800497', 'rs9939609', 'rs662799', 'rs17822931', 'rs4680', 'rs4988235', 'rs6025', 'rs7574865', 'rs1695', 'rs72921001', 'rs1537415', 'rs2472297', 'rs909525']
DEFAULT_SCOPE = 'names basic email ancestry relatives genomes %s' % (' '.join(SNPS))
BASE_API_URL = 'https://api.23andme.com/'
SECRET_KEY = app.config.get('SECRET_KEY')
@app.route('/')
def home():
auth_url = '%sauthorize/?response_type=code&redirect_uri=%s&client_id=%s&scope=%s' % (BASE_API_URL, REDIRECT_URI, CLIENT_ID, DEFAULT_SCOPE)
return render_template('landing.html', auth_url=auth_url)
@app.route('/get_info/')
def getUser():
response = make_response(render_template('index.html'))
return response
@app.route('/demo/')
def makeDemoUser():
#Add demo user to DB if they don't already exist
controller.create_demo_user()
demo_profile_id = 'demo_id'
demo_user_name = 'THIS IS A DEMO'
response = make_response(render_template('index.html'))
response.set_cookie('user_first_name', demo_user_name)
response.set_cookie('token', jwt_encode(demo_profile_id, demo_user_name, SECRET_KEY))
controller.createSnpsTable()
return response
#Refactor this route to take a userProfileID after the trailing slash with some syntax like: '<%s UserID >''
#i.e. the equivalent of '/:userId' with node/express servers
@app.route('/user/relativesinfo/')
#return all the relatives. Refactor to only return the relatives specific to the current User
def getRelatives():
decoded = jwt.decode(request.cookies.get('token'), SECRET_KEY, algorithms=['HS256'])
current_user_profile_id = decoded['user_profile_id']
#Retrieve all relatives from database, not filtered by user
#To Do: Filter this by user
user_relatives = models.db_session.query(models.user_relatives).all()
user_relatives_ids = []
#Iterate through all relatives
for user_relative in user_relatives:
user = list(user_relative)
#For each relative, grab only those that match on the current_user_profile_id
if current_user_profile_id == str(user[0]):
user_relatives_ids.append(int(user[1]))
#Retrieve all relatives from DB
#To Do: is this the same information in the user_relatives variable above?
relatives = models.db_session.query(models.Relative).all()
finalRelatives = []
#Iterate through all relatives
for relative in relatives:
#Grab only relatives who match the relatives in the user_relatives_ids storage
if relative.serialize()['id'] in user_relatives_ids:
finalRelatives.append(relative.serialize())
return jsonify({'relativeList' : finalRelatives})
@app.route('/user/snpinfo/', methods=['POST', 'GET']) #we should take out 'GET'?
def getSnps():
decoded = jwt.decode(request.cookies.get('token'), app.config.get('SECRET_KEY'), algorithms=['HS256'])
current_user_profile_id = decoded['user_profile_id']
user_snps = {}
user_data = models.db_session.query(models.User).filter(models.User.profile_id == current_user_profile_id).first().serialize()
for user_datum in user_data:
if user_datum[:2:].lower()=='rs':
user_snps[user_datum] = user_data[user_datum]
user_outcomes = []
for user_snp in user_snps:
# loop through entire snp table, if any of snp base pairs match up to the base pair in user snps, put in an object with rsid and outcome
current_snp = models.db_session.query(models.Snp).filter(models.Snp.rs_id == user_snp).filter(models.Snp.dnaPair == user_snps[user_snp]).first()
if current_snp is not None:
user_outcomes.append({"title": current_snp.serialize()["title"], "rsid": user_snp, "pair": user_snps[user_snp], "outcome": current_snp.serialize()['outcome'], "video": current_snp.serialize()['video']});
return jsonify({'outcomes': user_outcomes})
@app.route('/receive_code/')
def receive_code():
code = request.args.get('code')
parameters = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'grant_type': 'authorization_code',
'code': request.args.get('code'),
'redirect_uri': REDIRECT_URI,
'scope': DEFAULT_SCOPE
}
response = requests.post(
"%s%s" % (BASE_API_URL, "token/"),
data = parameters,
verify=False
)
#get access token from 23andMe
if response.status_code == 200:
access_token = response.json()['access_token']
headers = {'Authorization': 'Bearer %s' % access_token}
#Begin API calls to 23andMe to get all scoped user data
genotype_response = requests.get("%s%s" % (BASE_API_URL, "1/genotype/"),
params={'locations': ' '.join(SNPS)},
headers=headers,
verify=False)
user_profile_id = genotype_response.json().pop()['id']
user_response = requests.get("%s%s" % (BASE_API_URL, "1/user/?email=true"),
headers=headers,
verify=False)
name_response = requests.get("%s%s" % (BASE_API_URL, "1/names/%s" % user_profile_id),
headers=headers,
verify=False)
#if both API calls are successful, process user data
if user_response.status_code == 200 and genotype_response.status_code == 200:
user_first_name = name_response.json()['first_name']
#create additional thread to retrieve entire genome
genomeThread = Thread(target=controller.getGenome, args=(code,user_profile_id, headers,))
genomeThread.start()
#if user already exists in database, render the html and do not re-add user to database
if len(models.db_session.query(models.User).filter_by(profile_id=user_profile_id).all()) != 0:
response = make_response(redirect(url_for('getUser')))
response.set_cookie('user_first_name', user_first_name)
response.set_cookie('token', jwt_encode(user_profile_id, user_first_name, SECRET_KEY))
return response
# otherwise, add new user to database if they have never logged in before
else:
#Begin API calls to 23andMe to get additional user data
relatives_response = requests.get("%s%s" % (BASE_API_URL, "1/relatives/%s" % user_profile_id),
params = {'limit': 60, 'offset': 1},
headers=headers,
verify=False)
#call createNewUser from controller to add User and User relatives to the database
controller.createNewUser(name_response, relatives_response, genotype_response, user_response)
#create snps table
controller.createSnpsTable()
response = make_response(redirect(url_for('getUser')))
response.set_cookie('user_first_name', user_first_name)
response.set_cookie('token', jwt_encode(user_profile_id, user_first_name, SECRET_KEY))
return response
#error handling if api calls for additional user data to 23andMe fail
else:
reponse_text = genotype_response.text
response.raise_for_status()
#error handling if initial api calls to 23andMe fail
else:
response = make_response(redirect(url_for('home')))
return response
#Initialize python server on port
if __name__ == '__main__':
print 'Server has been initialized'
if is_prod:
app.run(host='0.0.0.0', port=PORT)
else:
app.run(debug=True, port=PORT)
|
|
#!/usr/bin/env python
# ====================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# See license.txt for license information.
# ====================================
import os
import sys
import pwd
import shutil
import grp
import urllib2
import time
import imp
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
LG = nxDSCLog.DSCLog
try:
import hashlib
md5const = hashlib.md5
except ImportError:
import md5
md5const = md5.md5
BLOCK_SIZE = 8192
global show_mof
show_mof = False
def init_locals(DestinationPath, SourcePath, Ensure, Type, Force, Contents,
Checksum, Recurse, Links, Owner, Group, Mode):
if DestinationPath is None :
DestinationPath = ''
if SourcePath is None :
SourcePath = ''
if Ensure is None or Ensure == '':
Ensure = 'present'
if Type is None :
Type = 'file'
if Force is None :
Force = False
Force = ( Force == True )
if Contents is None :
Contents = ''
if Checksum is None :
Checksum = ''
if Recurse is None :
Recurse = False
Recurse = ( Recurse == True )
if Links is None :
Links = 'follow'
if Owner is None :
Owner = ''
if Group is None :
Group = ''
if Mode is None :
Mode = ''
return DestinationPath.encode('ascii', 'ignore'), SourcePath.encode('ascii', 'ignore'), \
Ensure.encode('ascii', 'ignore').lower(), Type.encode('ascii', 'ignore').lower(), Force,\
Contents, Checksum.encode('ascii', 'ignore').lower(), Recurse, \
Links.encode('ascii', 'ignore').lower(), Owner, Group, Mode
def Set_Marshall(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode \
= init_locals(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
retval = Set(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
return retval
def Test_Marshall(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode \
= init_locals(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
retval = Test(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
return retval
def Get_Marshall(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
arg_names = list(locals().keys())
DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode \
= init_locals(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
retval = 0
(retval, DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode, ModifiedDate) \
= Get(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
DestinationPath = protocol.MI_String(DestinationPath)
SourcePath = protocol.MI_String(SourcePath)
Ensure = protocol.MI_String(Ensure)
Type = protocol.MI_String(Type)
Force = protocol.MI_Boolean(Force)
Contents = protocol.MI_String(Contents)
Checksum = protocol.MI_String(Checksum)
Recurse = protocol.MI_Boolean(Recurse)
Links = protocol.MI_String(Links)
Owner = protocol.MI_String(Owner)
Group = protocol.MI_String(Group)
Mode = protocol.MI_String(Mode)
ModifiedDate = protocol.MI_Timestamp.from_time(ModifiedDate)
arg_names.append('ModifiedDate')
retd = {}
ld = locals()
for k in arg_names :
retd[k] = ld[k]
return retval, retd
# ###########################################################
# Begin user defined DSC functions
# ###########################################################
def opened_w_error(filename, mode="r"):
"""
This context ensures the file is closed.
"""
try:
f = open(filename, mode)
except IOError, err:
return None, err
return f, None
def opened_bin_w_error(filename, mode="rb"):
"""
This context ensures the file is closed.
"""
try:
f = open(filename, mode)
except IOError, err:
return None, err
return f, None
def ReadFile1k(path):
"""
Safely attempt to read a file,
ensuring file is always closed at exit.
Read only 1k.
Return the data and the exception object.
The data is None if an error occurred.
The error is None if the data was read.
Log results to stderr.
"""
d = None
error = None
F, error = opened_bin_w_error(path)
if error:
Print("Exception opening file " + path + " Error: " + str(error), file=sys.stderr )
LG().Log('ERROR', "Exception opening file " + path + " Error: " + str(error))
else:
d = F.read(1024)
F.close()
return d, error
def ReadFile(path):
"""
Safely attempt to read a file,
ensuring file is always closed at exit.
Return the data and the exception object.
The data is None if an error occurred.
The error is None if the data was read.
Log results to stderr.
"""
d = None
error = None
F, error = opened_bin_w_error(path)
if error:
Print("Exception opening file " + path + " Error: " + str(error), file=sys.stderr )
LG().Log('ERROR', "Exception opening file " + path + " Error: " + str(error))
else:
d=F.read()
F.close()
return d,error
def WriteFile(path, contents):
"""
Safely attempt to write data to a file,
replacing the existing file or creating it and
ensuring file is always closed at exit.
Return the exception object.
The error is None if the data was written.
Log results to stderr.
"""
error = None
F, error = opened_w_error(path, 'w+')
if error:
Print("Exception opening file " + path + " Error Code: " + str(error) , file=sys.stderr)
LG().Log('ERROR', "Exception opening file " + path + " Error Code: " + str(error))
else:
F.write(contents)
F.close()
return error
def Print(s, file=sys.stderr):
file.write(s.encode('utf8') + '\n')
def LStatFile(path):
"""
LStat the file. Do not follow the symlink.
"""
d = None
error = None
try:
d = os.lstat(path)
except OSError, error:
Print("Exception lstating file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception lstating file " + path + " Error: " + str(error))
except IOError, error:
Print("Exception lstating file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception lstating file " + path + " Error: " + str(error))
return d
def StatFile(path):
"""
Stat the file, following the symlink.
"""
d = None
error = None
try:
d = os.stat(path)
except OSError, error:
Print("Exception stating file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception stating file " + path + " Error: " + str(error))
except IOError, error:
Print("Exception stating file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception stating file " + path + " Error: " + str(error))
return d
def Chown(path, owner, group):
error = None
try:
os.chown(path, owner, group)
except OSError, error:
Print("Exception changing ownership of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing ownership of file " + path + " Error: " + str(error))
except IOError, error:
Print("Exception changing ownership of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing ownership of file " + path + " Error: " + str(error))
return error
def Chmod(path, mode):
error = None
if type(mode) != int:
mode = int(mode, 8)
try:
os.chmod(path, mode)
except OSError, error:
Print("Exception changing mode of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing mode of file " + path + " Error: " + str(error))
except IOError, error:
Print("Exception changing mode of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing mode of file " + path + " Error: " + str(error))
return error
def LChown(path, owner, group):
error = None
try:
os.lchown(path, owner, group)
except OSError, error:
Print("Exception changing ownership of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing ownership of file " + path + " Error: " + str(error))
except IOError, error:
Print("Exception changing ownership of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing ownership of file " + path + " Error: " + str(error))
return error
def LChmod(path, mode):
error = None
try:
os.lchmod(path, mode)
except OSError, error:
Print("Exception changing mode of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing mode of file " + path + " Error: " + str(error))
except IOError, error:
Print("Exception changing mode of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing mode of file " + path + " Error: " + str(error))
return error
def ListDir(path):
d = None
error = None
try:
d = os.listdir(path)
except OSError, error:
Print("Exception listing dir " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception listing dir " + path + " Error: " + str(error))
except IOError, error:
Print("Exception listing dir " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception listing dir " + path + " Error: " + str(error))
return d
def Symlink(spath, dpath):
error = None
if spath == dpath: # Nothing to Link
return error
# remove the destination if present
if os.path.exists(dpath):
try:
os.unlink(dpath)
except OSError, error:
Print("Exception removing " + dpath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception removing " + dpath + " Error: " + str(error))
return error
except IOError, error:
Print("Exception removing " + dpath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception removing " + dpath + " Error: " + str(error))
return error
try:
os.symlink(spath, dpath)
except OSError, error:
Print("Exception creating symlink from " + spath + ' to ' + dpath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception creating symlink from " + spath + ' to ' + dpath + " Error: " + str(error))
except IOError, error:
Print("Exception creating symlink from " + spath + ' to ' + dpath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception creating symlink from " + spath + ' to ' + dpath + " Error: " + str(error))
return error
def MakeDirs(path):
error = None
try:
os.makedirs(path)
except OSError, error:
Print("Exception making dir " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception making dir " + path + " Error: " + str(error))
except IOError, error:
Print("Exception making dir " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception making dir " + path + " Error: " + str(error))
return error
def RemoveFile(path):
error = None
try:
os.remove(path)
except OSError, error:
Print("Exception removing file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception removing file " + path + " Error: " + str(error))
except IOError, error:
Print("Exception removing file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception removing file " + path + " Error: " + str(error))
return error
def CopyFile(spath, dpath):
error = None
if spath == dpath: # Nothing to copy!
return error
try:
shutil.copyfile(spath, dpath)
except OSError, error:
Print("Exception copying tree " + spath + ' to ' + dpath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception copying tree " + spath + ' to ' + dpath + " Error: " + str(error))
except IOError, error:
Print("Exception copying tree " + spath + ' to ' + dpath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception copying tree " + spath + ' to ' + dpath + " Error: " + str(error))
return error
def CompareFiles(DestinationPath, SourcePath, Checksum):
"""
If the files differ in size, return -1.
Reading and computing the hash here is done in a block-by-block manner,
in case the file is quite large.
"""
if SourcePath == DestinationPath: # Files are the same!
return 0
stat_dest = StatFile(DestinationPath)
stat_src = StatFile(SourcePath)
if stat_src.st_size != stat_dest.st_size:
return -1
if Checksum == "md5":
src_error = None
dest_error = None
src_hash = md5const()
dest_hash = md5const()
src_block = 'loopme'
dest_block = 'loopme'
src_file,src_error = opened_bin_w_error(SourcePath, 'rb')
if src_error:
Print("Exception opening source file " + SourcePath + " Error : " + str(src_error), file=sys.stderr)
LG().Log('ERROR', "Exception opening source file " + SourcePath + " Error : " + str(src_error))
return -1
dest_file, dest_error = opened_bin_w_error(DestinationPath, 'rb')
if dest_error:
Print("Exception opening destination file " + DestinationPath + " Error : " + str(dest_error), file=sys.stderr)
LG().Log('ERROR', "Exception opening destination file " + DestinationPath + " Error : " + str(dest_error))
src_file.close()
return -1
while src_block != '' and dest_block != '':
src_block = src_file.read(BLOCK_SIZE)
dest_block = dest_file.read(BLOCK_SIZE)
src_hash.update(src_block)
dest_hash.update(dest_block)
if src_hash.hexdigest() != dest_hash.hexdigest():
src_file.close()
dest_file.close()
return -1
if src_hash.hexdigest() == dest_hash.hexdigest():
src_file.close()
dest_file.close()
return 0
elif Checksum == "ctime":
if stat_src.st_ctime != stat_dest.st_ctime:
return -1
else:
return 0
elif Checksum == "mtime":
if stat_src.st_mtime != stat_dest.st_mtime:
return -1
else:
return 0
def RemoveTree(path):
error = None
try:
shutil.rmtree(path)
except OSError, error:
Print("Exception removing folder " + path + " Error Code: "
+ " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception removing folder " + path + " Error Code: "
+ " Error: " + str(error))
except IOError, error:
Print("Exception removing folder " + path + " Error Code: "
+ " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception removing folder " + path + " Error Code: "
+ " Error: " + str(error))
return error
def RemovePath(path):
error = None
if os.path.islink(path) or os.path.isfile(path):
RemoveFile(path)
elif os.path.isdir(path):
RemoveTree(path)
else:
Print("Error: Unknown file type for file: " + path, file=sys.stderr)
LG().Log('ERROR', "Error: Unknown file type for file: " + path)
return error
def TestOwnerGroupMode(DestinationPath, SourcePath, fc):
stat_info = LStatFile(DestinationPath)
if SourcePath:
stat_info_src = LStatFile(SourcePath)
if fc.Owner:
try:
Specified_Owner_ID = pwd.getpwnam(fc.Owner)[2]
except KeyError, error:
Print("Exception obtaining gid from group name " + fc.Group + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception obtaining gid from group name " + fc.Group + " Error: " + str(error))
return False
if Specified_Owner_ID != pwd.getpwuid(stat_info.st_uid)[2]:
return False
elif SourcePath:
# Owner wasn't specified, if SourcePath is specified then check that the Owners match
if pwd.getpwuid(stat_info.st_uid)[2] != pwd.getpwuid(stat_info_src.st_uid)[2]:
return False
if fc.Group:
try:
Specified_Group_ID = grp.getgrnam(fc.Group)[2]
except KeyError, error:
Print("Exception obtaining gid from group name " + fc.Group + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception obtaining gid from group name " + fc.Group + " Error: " + str(error))
return False
if Specified_Group_ID != grp.getgrgid(stat_info.st_gid)[2]:
return False
elif SourcePath:
# Group wasn't specified, if SourcePath is specified then check that the Groups match
if grp.getgrgid(stat_info.st_gid)[2] != grp.getgrgid(stat_info_src.st_gid)[2]:
return False
# Mode is irrelevant to symlinks
if not os.path.islink(DestinationPath):
if fc.Mode:
if str(oct(stat_info.st_mode))[-3:] != fc.Mode:
return False
elif SourcePath:
# Mode wasn't specified, if SourcePath is specified then check that the Modes match
if str(oct(stat_info.st_mode))[-3:] != str(oct(stat_info_src.st_mode))[-3:]:
return False
return True
def ConvertLongModeToNumeric(Mode):
u_r = Mode[0]
u_w = Mode[1]
u_x = Mode[2]
g_r = Mode[3]
g_w = Mode[4]
g_x = Mode[5]
o_r = Mode[6]
o_w = Mode[7]
o_x = Mode[8]
first_digit = 0
second_digit = 0
third_digit = 0
if u_r == "r":
first_digit += 4
elif u_r == "-":
pass
else:
raise Exception("Error: Invalid character for character 0 in Mode")
if u_w == "w":
first_digit += 2
elif u_w == "-":
pass
else:
raise Exception("Error: Invalid character for character 1 in Mode")
if u_x == "x":
first_digit += 1
elif u_x == "-":
pass
else:
raise Exception("Error: Invalid character for character 2 in Mode")
if g_r == "r":
second_digit += 4
elif g_r == "-":
pass
else:
raise Exception("Error: Invalid character for character 3 in Mode")
if g_w == "w":
second_digit += 2
elif g_w == "-":
pass
else:
raise Exception("Error: Invalid character for character 4 in Mode")
if g_x == "x":
second_digit += 1
elif g_x == "-":
pass
else:
raise Exception("Error: Invalid character for character 5 in Mode")
if o_r == "r":
third_digit += 4
elif o_r == "-":
pass
else:
raise Exception("Error: Invalid character for character 6 in Mode")
if o_w == "w":
third_digit += 2
elif o_w == "-":
pass
else:
raise Exception("Error: Invalid character for character 7 in Mode")
if o_x == "x":
third_digit += 1
elif o_x == "-":
pass
else:
raise Exception("Error: Invalid character for character 8 in Mode")
return str(first_digit) + str(second_digit) + str(third_digit)
def SetOwnerGroupMode(DestinationPath, SourcePath, fc):
stat_info = LStatFile(DestinationPath)
if stat_info is None :
return False
if SourcePath:
stat_info_src = LStatFile(SourcePath)
if stat_info_src is None:
return False
if fc.Owner:
Specified_Owner_ID = pwd.getpwnam(fc.Owner)[2]
if Specified_Owner_ID != pwd.getpwuid(stat_info.st_uid)[2]:
Print("Changing owner of " + DestinationPath + " to " + str(Specified_Owner_ID))
LG().Log('INFO', "Changing owner of " + DestinationPath + " to " + str(Specified_Owner_ID))
if LChown(DestinationPath, Specified_Owner_ID, -1) is not None :
return False
elif SourcePath:
src_uid = pwd.getpwuid(stat_info_src.st_uid)[2]
if pwd.getpwuid(stat_info.st_uid)[2] != src_uid:
Print("Changing owner of " + DestinationPath + " to " + str(src_uid))
LG().Log('INFO', "Changing owner of " + DestinationPath + " to " + str(src_uid))
if LChown(DestinationPath, src_uid, -1) is not None :
return False
if fc.Group:
Specified_Group_ID = grp.getgrnam(fc.Group)[2]
if Specified_Group_ID != grp.getgrgid(stat_info.st_gid)[2]:
Print("Changing group of " + DestinationPath + " to " + str(Specified_Group_ID))
LG().Log('INFO', "Changing group of " + DestinationPath + " to " + str(Specified_Group_ID))
if LChown(DestinationPath, -1, Specified_Group_ID) is not None :
return False
elif SourcePath:
src_gid = grp.getgrgid(stat_info_src.st_gid)[2]
if grp.getgrgid(stat_info.st_gid)[2] != src_gid:
Print("Changing group of " + DestinationPath + " to " + str(src_gid))
LG().Log('INFO', "Changing group of " + DestinationPath + " to " + str(src_gid))
if LChown(DestinationPath, src_gid , -1) is not None :
return False
# Mode is irrelevant to symlinks
if not os.path.islink(DestinationPath):
if fc.Mode:
if str(oct(stat_info.st_mode))[-3:] != fc.Mode:
Print("Changing mode of " + DestinationPath + " to " + fc.Mode)
LG().Log('INFO', "Changing mode of " + DestinationPath + " to " + fc.Mode)
if Chmod(DestinationPath, fc.Mode) is not None :
return False
elif SourcePath:
src_mode = str(oct(stat_info_src.st_mode))[-3:]
if str(oct(stat_info.st_mode))[-3:] != src_mode:
Print("Changing mode of " + DestinationPath + " to " + src_mode)
LG().Log('INFO', "Changing mode of " + DestinationPath + " to " + src_mode)
if Chmod(DestinationPath, src_mode) is not None :
return False
return True
def SetDirectoryRecursive(DestinationPath, SourcePath, fc):
if not os.path.exists(DestinationPath):
MakeDirs(DestinationPath)
if SetOwnerGroupMode(DestinationPath, SourcePath, fc) is False:
return False
Destination_subfiles = ListDir(DestinationPath)
if Destination_subfiles is None:
return False
if not SourcePath:
# Enforce Owner/Group/Mode specified
for f in Destination_subfiles:
f_destpath = os.path.join(DestinationPath, f)
if not os.path.islink(f_destpath):
if os.path.isfile(f_destpath):
if SetOwnerGroupMode(f_destpath, "", fc) is False :
return False
elif os.path.isdir(f_destpath):
if fc.Recurse :
if SetDirectoryRecursive(f_destpath, "", fc) is False :
return False
return True
Source_subfiles = ListDir(SourcePath)
# For all files in SourcePath's directory, ensure they exist with proper contents and stat in DestionationPath's directory
for f in Source_subfiles:
f_srcpath = os.path.join(SourcePath, f)
f_destpath = os.path.join(DestinationPath, f)
if os.path.islink(f_srcpath):
if TestLink(f_destpath, f_srcpath, fc) is False:
if SetLink(f_destpath, f_srcpath, fc) is False:
return False
elif os.path.isfile(f_srcpath):
if TestFile(f_destpath, f_srcpath, fc) is False:
if SetFile(f_destpath, f_srcpath, fc) is False:
return False
elif os.path.isdir(f_srcpath):
if fc.Recurse :
if SetDirectoryRecursive(f_destpath, f_srcpath, fc) is False:
return False
return True
def SetFile(DestinationPath, SourcePath, fc):
error = None
if os.path.exists(DestinationPath) and (os.path.islink(DestinationPath) or os.path.isdir(DestinationPath)):
if fc.Force :
RemovePath(DestinationPath)
else:
Print("Error: " + DestinationPath + " is not a file; cannot overwrite without the 'Force' option being true")
LG().Log("ERROR", DestinationPath + " is not a file; cannot overwrite without the 'Force' option being true")
return False
if SourcePath and len(SourcePath) > 0:
if '://' in SourcePath and fc.LocalPath == '':
ret = GetRemoteFile(fc)
if ret != 0:
raise Exception('Unable to retrieve remote resource '+fc.SourcePath+' Error is ' + str(ret))
else:
if fc.LocalPath == '': # Checksum !='md5' the remote time is not newer that dest's ctime or mtime no download needed
return True
SourcePath = fc.LocalPath
should_copy_file = False
if os.path.isfile(DestinationPath):
if CompareFiles(DestinationPath, SourcePath, fc.Checksum) == -1:
should_copy_file = True
else:
should_copy_file = False
else:
should_copy_file = True
if should_copy_file:
if CopyFile(SourcePath, DestinationPath) is False :
return False
elif fc.Contents:
if WriteFile(DestinationPath, fc.Contents) is not None:
Print("Error: Unable to write file at " + DestinationPath)
LG().Log("ERROR", "Unable to write file at " + DestinationPath)
return False
else:
# Create a file with nothing in it
try:
open(DestinationPath, 'a').close()
except OSError, error:
Print("Exception creating file " + DestinationPath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception creating file " + DestinationPath + " Error: " + str(error))
except IOError, error:
Print("Exception creating file " + DestinationPath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception creating file " + DestinationPath + " Error: " + str(error))
SetOwnerGroupMode(DestinationPath, SourcePath, fc)
if len(fc.LocalPath) > 0 :
RemoveFile(fc.LocalPath)
return True
def SetDirectory(DestinationPath, SourcePath, fc):
if os.path.exists(DestinationPath) and not os.path.isdir(DestinationPath):
if fc.Force :
RemovePath(DestinationPath)
else:
Print("Error: Unable to overwrite currently existing non-directory object at " + DestinationPath + " without the Force option being true.")
LG().Log("ERROR", "Unable to overwrite currently existing non-directory object at " + DestinationPath + " without the Force option being true.")
return False
return SetDirectoryRecursive(DestinationPath, SourcePath, fc)
def SetLink(DestinationPath, SourcePath, fc):
if SourcePath is None or len(SourcePath) < 1 or not os.path.exists(SourcePath) :
Print("Error: Need a valid source path in order to create a new symbolic link.")
LG().Log("ERROR", "Need a valid source path in order to create a new symbolic link.")
return False
if os.path.exists(DestinationPath) and not os.path.islink(DestinationPath) :
if fc.Force :
RemovePath(DestinationPath)
else:
Print("Error: Unable to overwrite currently existing non-link object at " + DestinationPath + " without the Force option being true.")
LG().Log("ERROR", "Unable to overwrite currently existing non-link object at " + DestinationPath + " without the Force option being true.")
return False
if os.path.islink(SourcePath):
if fc.Links == "follow":
if os.path.isfile(SourcePath):
if SetFile(DestinationPath, os.path.realpath(SourcePath), fc) is False:
return False
elif os.path.isdir(SourcePath):
if SetDirectoryRecursive(DestinationPath, os.path.realpath(SourcePath), fc) is False:
return False
elif fc.Links == "manage":
if Symlink(os.readlink(SourcePath), DestinationPath) is not None:
return False
elif fc.Links == "ignore":
# Ignore all symlinks
return True
else:
if Symlink(SourcePath, DestinationPath) is not None:
return False
SetOwnerGroupMode(DestinationPath, SourcePath, fc)
return True
def SetShowMof(a):
global show_mof
show_mof = a
def ShowMof(op, DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
if not show_mof:
return
mof = ''
mof += op + ' nxFile MyFile\n'
mof += '{\n'
mof += ' DestinationPath = "' + DestinationPath + '"\n'
mof += ' SourcePath = "' + SourcePath + '"\n'
mof += ' Ensure = "' + Ensure + '"\n'
mof += ' Type = "' + Type + '"\n'
mof += ' Force = ' + str(Force) + '\n'
mof += ' Contents = "' + Contents + '"\n'
mof += ' Checksum = "' + Checksum + '"\n'
mof += ' Recurse = ' + str(Recurse) + '\n'
mof += ' Links = "' + Links + '"\n'
mof += ' Group = "' + Group + '"\n'
mof += ' Mode = "' + Mode + '"\n'
mof += ' Owner = "' + Owner + '"\n'
mof += '}\n'
f = open('./test_mofs.log', 'a')
Print(mof, file=f)
LG().Log('INFO', mof)
f.close()
def Set(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
ShowMof('SET', DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
fc = FileContext(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
if not DestinationPath:
return [-1]
if fc.Ensure == "present":
if fc.Type == "file":
if SetFile(DestinationPath, SourcePath, fc) is False:
return [-1]
elif fc.Type == "directory":
if SetDirectory(DestinationPath, SourcePath, fc) is False:
return [-1]
elif fc.Type == "link":
if SetLink(DestinationPath, SourcePath, fc) is False:
return [-1]
elif fc.Ensure == "absent":
RemovePath(DestinationPath)
return [0]
return [0]
def TestDirectory(DestinationPath, SourcePath, fc):
if not os.path.exists(DestinationPath) or not os.path.isdir(DestinationPath):
return False
if TestOwnerGroupMode(DestinationPath, SourcePath, fc) is False:
return False
if fc.Recurse is False:
return True
Destination_subfiles = ListDir(DestinationPath)
if Destination_subfiles is None:
return False
if not SourcePath:
# Enforce Owner/Group/Mode specified
for f in Destination_subfiles:
f_destpath = os.path.join(DestinationPath, f)
if not os.path.islink(f_destpath):
if os.path.isfile(f_destpath):
if TestOwnerGroupMode(f_destpath, "", fc) is False:
return False
elif os.path.isdir(f_destpath):
if TestDirectory(f_destpath, "", fc) is False:
return False
return True
Source_subfiles = ListDir(SourcePath)
if Source_subfiles is None:
return False
for f in Source_subfiles:
if f not in Destination_subfiles:
Print("File: " + f + " does not exist in: " + SourcePath)
LG().Log('ERROR', "File: " + f + " does not exist in: " + SourcePath)
return False
f_destpath = os.path.join(DestinationPath, f)
f_srcpath = os.path.join(SourcePath, f)
if os.path.islink(f_srcpath):
if TestLink(f_destpath, f_srcpath, fc) is False:
return False
elif os.path.isfile(f_srcpath):
if TestFile(f_destpath, f_srcpath, fc) is False:
return False
elif os.path.isdir(f_srcpath):
if TestDirectory(f_destpath, f_srcpath, fc) is False:
return False
return True
def TestFile(DestinationPath, SourcePath, fc):
if '://' in SourcePath and fc.LocalPath == '': # we cannot verify the remote has not changed until the Set
return False
if not os.path.exists(DestinationPath) or not os.path.isfile(DestinationPath) or os.path.islink(DestinationPath):
return False
if TestOwnerGroupMode(DestinationPath, SourcePath, fc) is False:
return False
if SourcePath and len(SourcePath) > 0:
if not os.path.isfile(SourcePath):
return False
if os.path.islink(SourcePath):
if fc.Links == "follow":
if os.path.isdir(os.path.realpath(SourcePath)):
Print("Error: Expecting a file, but source link points to directory")
LG().Log("ERROR", "Expecting a file, but source link points to directory")
return False
else:
if not os.path.islink(DestinationPath):
return False
if os.readlink(DestinationPath) != os.readlink(SourcePath):
return False
elif CompareFiles(DestinationPath, SourcePath, fc.Checksum) == -1:
return False
elif fc.Contents:
dest_file, error = ReadFile(DestinationPath)
if fc.Contents.encode('utf8') != dest_file:
return False
return True
def TestLink(DestinationPath, SourcePath, fc):
if SourcePath:
if os.path.islink(SourcePath):
if fc.Links == "follow":
if os.path.isdir(SourcePath):
if TestDirectory(DestinationPath, os.path.realpath(SourcePath), fc) is False:
return False
elif os.path.isfile(SourcePath):
if TestFile(DestinationPath, os.path.realpath(SourcePath), fc) is False:
return False
elif fc.Links == "manage":
if not os.path.islink(DestinationPath):
return False
if os.readlink(DestinationPath) != os.readlink(SourcePath):
return False
elif fc.Links == "ignore":
return True
else:
if not os.path.exists(DestinationPath) or not os.path.exists(SourcePath) or not os.path.islink(DestinationPath) :
return False
if os.readlink(DestinationPath) != SourcePath:
return False
if os.path.exists(DestinationPath) != True:
return False
if TestOwnerGroupMode(DestinationPath, SourcePath, fc) is False:
return False
return True
def Test(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
ShowMof('TEST', DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
fc = FileContext(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
if not DestinationPath:
return [-1]
if fc.Ensure == "present":
if fc.Type == "file":
if TestFile(DestinationPath, SourcePath, fc) is False:
return [-1]
elif fc.Type == "directory":
if TestDirectory(DestinationPath, SourcePath, fc) is False:
return [-1]
elif fc.Type == "link":
if TestLink(DestinationPath, SourcePath, fc) is False:
return [-1]
elif fc.Ensure == "absent":
if os.path.exists(DestinationPath):
return [-1]
return [0]
return [0]
def Get(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
ShowMof('GET', DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
if '://' in SourcePath and Type != 'file':
raise Exception('ERROR: Remote paths are only valid for Type = "file".')
if not DestinationPath:
Ensure = "absent"
ModifiedDate = 0
return [-1, DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode, ModifiedDate]
if not os.path.exists(DestinationPath):
Ensure = "absent"
ModifiedDate = 0
return [0, DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode, ModifiedDate]
stat_info = os.lstat(DestinationPath)
Owner = pwd.getpwuid(stat_info.st_uid)[0]
Group = grp.getgrgid(stat_info.st_gid)[0]
Mode = str(oct(stat_info.st_mode))[-3:]
if os.path.islink(DestinationPath):
Type = "link"
elif os.path.isfile(DestinationPath):
Type = "file"
elif os.path.isdir(DestinationPath):
Type = "directory"
ModifiedDate = stat_info.st_mtime
if Type == "directory":
Contents = repr(ListDir(DestinationPath))
elif Type == 'link':
if Links == 'manage' :
Contents = LStatFile(DestinationPath)
Contents = repr(Contents)
elif Links == 'follow':
if os.path.isdir(os.readlink(DestinationPath)):
Contents = repr(ListDir(DestinationPath))
else:
Contents, error = ReadFile1k(DestinationPath)
else :
Contents, error = ReadFile1k(DestinationPath)
if Contents is None:
Contents = ''
return [0, DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode, ModifiedDate]
def GetTimeFromString(s):
if s is None or len(s) == 0:
return None
fmt = []
fmt.append('%a, %d %b %Y %H:%M:%S %Z')
st = None
for f in fmt:
try:
st = time.strptime(s, f)
except ValueError:
continue
return st
def GetRemoteFile(fc):
req = urllib2.Request(fc.SourcePath)
try:
resp = urllib2.urlopen(req)
except urllib2.URLError , e:
Print(repr(e))
LG().Log('ERROR', repr(e))
return 1
fc.LocalPath = '/tmp/'+os.path.basename(fc.DestinationPath)+'_remote'
h = resp.info()
data = None
if fc.Checksum != 'md5' : # if not 'md5' check the last_modified header time before we download
lm = h.getheader('last-modified')
remote_mtime = GetTimeFromString(lm)
destination_mtime = None
dst_st = None
if os.path.exists(fc.DestinationPath):
dst_st = LStatFile(fc.DestinationPath)
if dst_st is not None:
if fc.Checksum == 'ctime':
destination_mtime = time.gmtime(dst_st.st_ctime)
else:
destination_mtime = time.gmtime(dst_st.st_mtime)
if remote_mtime is not None and destination_mtime is not None and destination_mtime >= remote_mtime:
data = ''
fc.LocalPath = ''
return 0
data='keep going'
hasWritten = False
try:
F = open(fc.LocalPath, 'wb+')
while data:
data = resp.read(1048576)
if data is not None and len(data) > 0:
hasWritten = True
F.write(data)
if hasWritten == False:
LG().Log('ERROR', "Data at URL: " + fc.SourcePath + " was empty. Please ensure this file exists at this remote location.")
F.close()
os.unlink(fc.LocalPath)
return 1
F.close()
except Exception, e:
Print(repr(e))
LG().Log('ERROR', repr(e))
F.close()
os.unlink(fc.LocalPath)
return 1
return 0
class FileContext:
def __init__(self, DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
if not Checksum:
Checksum = "md5"
if not Type:
Type = "file"
if not Ensure:
Ensure = "present"
if not Links or len(Links) == 0:
Links = "follow"
self.DestinationPath = DestinationPath
self.SourcePath = SourcePath
if len(SourcePath) > 0 and '://' in SourcePath and Type != 'file':
raise Exception('ERROR: Remote paths are only valid for Type = file.')
self.LocalPath = ''
self.Ensure = Ensure.lower()
self.Type = Type.lower()
self.Force = Force
self.Contents = Contents
self.Checksum = Checksum.lower()
self.Recurse = Recurse
self.Links = Links.lower()
self.Owner = Owner
self.Group = Group
self.ModifiedDate = ''
error = None
if Mode:
if len(Mode) == 9:
try:
Mode = ConvertLongModeToNumeric(Mode)
except Exception, error:
Print("Exception in ConvertLongModeToNumeric on " + Mode + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception in ConvertLongModeToNumeric on " + Mode + " Error: " + str(error))
elif len(Mode) == 3:
# Already in proper format
pass
else:
Print("Error: Invalid Mode: " + Mode)
LG().Log("ERROR", "Invalid Mode: " + Mode)
Mode = ""
self.Mode = Mode
|
|
from future import standard_library
standard_library.install_aliases()
from builtins import object, str
from copy import deepcopy
from pickle import dumps
import simplejson
import os
import getpass
from socket import getfqdn
from uuid import uuid1
import numpy as np
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import prov.model as pm
from ..external.six import string_types, text_type
from .. import get_info
from .filemanip import (md5, hashlib, hash_infile)
from .. import logging, __version__
iflogger = logging.getLogger('interface')
foaf = pm.Namespace("foaf", "http://xmlns.com/foaf/0.1/")
dcterms = pm.Namespace("dcterms", "http://purl.org/dc/terms/")
nipype_ns = pm.Namespace("nipype", "http://nipy.org/nipype/terms/")
niiri = pm.Namespace("niiri", "http://iri.nidash.org/")
crypto = pm.Namespace("crypto",
("http://id.loc.gov/vocabulary/preservation/"
"cryptographicHashFunctions/"))
get_id = lambda: niiri[uuid1().hex]
def get_attr_id(attr, skip=None):
dictwithhash, hashval = get_hashval(attr, skip=skip)
return niiri[hashval]
max_text_len = 1024000
def get_hashval(inputdict, skip=None):
"""Return a dictionary of our items with hashes for each file.
Searches through dictionary items and if an item is a file, it
calculates the md5 hash of the file contents and stores the
file name and hash value as the new key value.
However, the overall bunch hash is calculated only on the hash
value of a file. The path and name of the file are not used in
the overall hash calculation.
Returns
-------
dict_withhash : dict
Copy of our dictionary with the new file hashes included
with each file.
hashvalue : str
The md5 hash value of the traited spec
"""
dict_withhash = {}
dict_nofilename = OrderedDict()
keys = {}
for key in inputdict:
if skip is not None and key in skip:
continue
keys[key.uri] = key
for key in sorted(keys):
val = inputdict[keys[key]]
outname = key
try:
if isinstance(val, pm.URIRef):
val = val.decode()
except AttributeError:
pass
if isinstance(val, pm.QualifiedName):
val = val.uri
if isinstance(val, pm.Literal):
val = val.value
dict_nofilename[outname] = _get_sorteddict(val)
dict_withhash[outname] = _get_sorteddict(val, True)
sorted_dict = str(sorted(dict_nofilename.items()))
return (dict_withhash, md5(sorted_dict.encode()).hexdigest())
def _get_sorteddict(object, dictwithhash=False):
if isinstance(object, dict):
out = OrderedDict()
for key, val in sorted(object.items()):
if val:
out[key] = _get_sorteddict(val, dictwithhash)
elif isinstance(object, (list, tuple)):
out = []
for val in object:
if val:
out.append(_get_sorteddict(val, dictwithhash))
if isinstance(object, tuple):
out = tuple(out)
else:
if isinstance(object, string_types) and os.path.isfile(object):
hash = hash_infile(object)
if dictwithhash:
out = (object, hash)
else:
out = hash
elif isinstance(object, float):
out = '%.10f' % object
else:
out = object
return out
def safe_encode(x, as_literal=True):
"""Encodes a python value for prov
"""
if x is None:
value = "Unknown"
if as_literal:
return pm.Literal(value, pm.XSD['string'])
else:
return value
try:
if isinstance(x, (str, string_types)):
if os.path.exists(x):
value = 'file://%s%s' % (getfqdn(), x)
if not as_literal:
return value
try:
return pm.URIRef(value)
except AttributeError:
return pm.Literal(value, pm.XSD['anyURI'])
else:
if len(x) > max_text_len:
value = x[:max_text_len - 13] + ['...Clipped...']
else:
value = x
if not as_literal:
return value
if isinstance(value, str):
return pm.Literal(value, pm.XSD['string'])
else:
return pm.Literal(text_type(value, 'utf-8'), pm.XSD['string'])
if isinstance(x, int):
if not as_literal:
return x
return pm.Literal(int(x), pm.XSD['integer'])
if isinstance(x, float):
if not as_literal:
return x
return pm.Literal(x, pm.XSD['float'])
if isinstance(x, dict):
outdict = {}
for key, value in list(x.items()):
encoded_value = safe_encode(value, as_literal=False)
if isinstance(encoded_value, pm.Literal):
outdict[key] = encoded_value.json_representation()
else:
outdict[key] = encoded_value
if not as_literal:
return simplejson.dumps(outdict)
return pm.Literal(simplejson.dumps(outdict), pm.XSD['string'])
if isinstance(x, list):
try:
nptype = np.array(x).dtype
if nptype == np.dtype(object):
raise ValueError('dtype object')
except ValueError as e:
outlist = []
for value in x:
encoded_value = safe_encode(value, as_literal=False)
if isinstance(encoded_value, pm.Literal):
outlist.append(encoded_value.json_representation())
else:
outlist.append(encoded_value)
else:
outlist = x
if not as_literal:
return simplejson.dumps(outlist)
return pm.Literal(simplejson.dumps(outlist), pm.XSD['string'])
if not as_literal:
return dumps(x)
return pm.Literal(dumps(x), nipype_ns['pickle'])
except TypeError as e:
iflogger.debug(e)
value = "Could not encode: " + str(e)
if not as_literal:
return value
return pm.Literal(value, pm.XSD['string'])
def prov_encode(graph, value, create_container=True):
if isinstance(value, list) and create_container:
if len(value) == 0:
encoded_literal = safe_encode(value)
attr = {pm.PROV['value']: encoded_literal}
id = get_attr_id(attr)
entity = graph.entity(id, attr)
elif len(value) > 1:
try:
entities = []
for item in value:
item_entity = prov_encode(graph, item)
entities.append(item_entity)
if isinstance(item, list):
continue
if not isinstance(list(item_entity.value)[0], string_types):
raise ValueError('Not a string literal')
if 'file://' not in list(item_entity.value)[0]:
raise ValueError('No file found')
id = get_id()
entity = graph.collection(identifier=id)
for item_entity in entities:
graph.hadMember(id, item_entity)
except ValueError as e:
iflogger.debug(e)
entity = prov_encode(graph, value, create_container=False)
else:
entity = prov_encode(graph, value[0])
else:
encoded_literal = safe_encode(value)
attr = {pm.PROV['value']: encoded_literal}
if isinstance(value, string_types) and os.path.exists(value):
attr.update({pm.PROV['location']: encoded_literal})
if not os.path.isdir(value):
sha512 = hash_infile(value, crypto=hashlib.sha512)
attr.update({crypto['sha512']: pm.Literal(sha512,
pm.XSD['string'])})
id = get_attr_id(attr, skip=[pm.PROV['location'],
pm.PROV['value']])
else:
id = get_attr_id(attr, skip=[pm.PROV['location']])
else:
id = get_attr_id(attr)
entity = graph.entity(id, attr)
return entity
def write_provenance(results, filename='provenance', format='all'):
ps = ProvStore()
ps.add_results(results)
return ps.write_provenance(filename=filename, format=format)
class ProvStore(object):
def __init__(self):
self.g = pm.ProvDocument()
self.g.add_namespace(foaf)
self.g.add_namespace(dcterms)
self.g.add_namespace(nipype_ns)
self.g.add_namespace(niiri)
def add_results(self, results, keep_provenance=False):
if keep_provenance and results.provenance:
self.g = deepcopy(results.provenance)
return self.g
runtime = results.runtime
interface = results.interface
inputs = results.inputs
outputs = results.outputs
classname = interface.__name__
modulepath = "{0}.{1}".format(interface.__module__, interface.__name__)
activitytype = ''.join([i.capitalize() for i in modulepath.split('.')])
a0_attrs = {nipype_ns['module']: interface.__module__,
nipype_ns["interface"]: classname,
pm.PROV["type"]: nipype_ns[activitytype],
pm.PROV["label"]: classname,
nipype_ns['duration']: safe_encode(runtime.duration),
nipype_ns['workingDirectory']: safe_encode(runtime.cwd),
nipype_ns['returnCode']: safe_encode(runtime.returncode),
nipype_ns['platform']: safe_encode(runtime.platform),
nipype_ns['version']: safe_encode(runtime.version),
}
a0_attrs[foaf["host"]] = pm.Literal(runtime.hostname,
pm.XSD['anyURI'])
try:
a0_attrs.update({nipype_ns['command']: safe_encode(runtime.cmdline)})
a0_attrs.update({nipype_ns['commandPath']:
safe_encode(runtime.command_path)})
a0_attrs.update({nipype_ns['dependencies']:
safe_encode(runtime.dependencies)})
except AttributeError:
pass
a0 = self.g.activity(get_id(), runtime.startTime, runtime.endTime,
a0_attrs)
# environment
id = get_id()
env_collection = self.g.collection(id)
env_collection.add_attributes({pm.PROV['type']:
nipype_ns['Environment'],
pm.PROV['label']: "Environment"})
self.g.used(a0, id)
# write environment entities
for idx, (key, val) in enumerate(sorted(runtime.environ.items())):
if key not in ['PATH', 'FSLDIR', 'FREESURFER_HOME', 'ANTSPATH',
'CAMINOPATH', 'CLASSPATH', 'LD_LIBRARY_PATH',
'DYLD_LIBRARY_PATH', 'FIX_VERTEX_AREA',
'FSF_OUTPUT_FORMAT', 'FSLCONFDIR', 'FSLOUTPUTTYPE',
'LOGNAME', 'USER',
'MKL_NUM_THREADS', 'OMP_NUM_THREADS']:
continue
in_attr = {pm.PROV["label"]: key,
nipype_ns["environmentVariable"]: key,
pm.PROV["value"]: safe_encode(val)}
id = get_attr_id(in_attr)
self.g.entity(id, in_attr)
self.g.hadMember(env_collection, id)
# write input entities
if inputs:
id = get_id()
input_collection = self.g.collection(id)
input_collection.add_attributes({pm.PROV['type']:
nipype_ns['Inputs'],
pm.PROV['label']: "Inputs"})
# write input entities
for idx, (key, val) in enumerate(sorted(inputs.items())):
in_entity = prov_encode(self.g, val).identifier
self.g.hadMember(input_collection, in_entity)
used_attr = {pm.PROV["label"]: key,
nipype_ns["inPort"]: key}
self.g.used(activity=a0, entity=in_entity,
other_attributes=used_attr)
# write output entities
if outputs:
id = get_id()
output_collection = self.g.collection(id)
if not isinstance(outputs, dict):
outputs = outputs.get_traitsfree()
output_collection.add_attributes({pm.PROV['type']:
nipype_ns['Outputs'],
pm.PROV['label']:
"Outputs"})
self.g.wasGeneratedBy(output_collection, a0)
# write output entities
for idx, (key, val) in enumerate(sorted(outputs.items())):
out_entity = prov_encode(self.g, val).identifier
self.g.hadMember(output_collection, out_entity)
gen_attr = {pm.PROV["label"]: key,
nipype_ns["outPort"]: key}
self.g.generation(out_entity, activity=a0,
other_attributes=gen_attr)
# write runtime entities
id = get_id()
runtime_collection = self.g.collection(id)
runtime_collection.add_attributes({pm.PROV['type']:
nipype_ns['Runtime'],
pm.PROV['label']:
"RuntimeInfo"})
self.g.wasGeneratedBy(runtime_collection, a0)
for key, value in sorted(runtime.items()):
if not value:
continue
if key not in ['stdout', 'stderr', 'merged']:
continue
attr = {pm.PROV["label"]: key,
nipype_ns[key]: safe_encode(value)}
id = get_id()
self.g.entity(get_id(), attr)
self.g.hadMember(runtime_collection, id)
# create agents
user_attr = {pm.PROV["type"]: pm.PROV["Person"],
pm.PROV["label"]: getpass.getuser(),
foaf["name"]: safe_encode(getpass.getuser())}
user_agent = self.g.agent(get_attr_id(user_attr), user_attr)
agent_attr = {pm.PROV["type"]: pm.PROV["SoftwareAgent"],
pm.PROV["label"]: "Nipype",
foaf["name"]: safe_encode("Nipype"),
nipype_ns["version"]: __version__}
for key, value in list(get_info().items()):
agent_attr.update({nipype_ns[key]: safe_encode(value)})
software_agent = self.g.agent(get_attr_id(agent_attr), agent_attr)
self.g.wasAssociatedWith(a0, user_agent, None, None,
{pm.PROV["hadRole"]: nipype_ns["LoggedInUser"]})
self.g.wasAssociatedWith(a0, software_agent)
return self.g
def write_provenance(self, filename='provenance', format='all'):
if format in ['provn', 'all']:
with open(filename + '.provn', 'wt') as fp:
fp.writelines(self.g.get_provn())
try:
if format in ['rdf', 'all']:
if len(self.g.bundles) == 0:
rdf_format = 'turtle'
ext = '.ttl'
else:
rdf_format = 'trig'
ext = '.trig'
self.g.serialize(filename + ext, format='rdf', rdf_format=rdf_format)
if format in ['jsonld']:
self.g.serialize(filename + '.jsonld', format='rdf', rdf_format='json-ld', indent=4)
except pm.serializers.DoNotExist:
pass
return self.g
|
|
"""Test functions for 1D array set operations.
"""
from __future__ import division, absolute_import, print_function
from numpy.testing import *
import numpy as np
from numpy.lib.arraysetops import *
import warnings
class TestSetOps(TestCase):
def test_unique(self):
def check_all(a, b, i1, i2, dt):
msg = "check values failed for type '%s'" % dt
v = unique(a)
assert_array_equal(v, b, msg)
msg = "check indexes failed for type '%s'" % dt
v, j = unique(a, 1, 0)
assert_array_equal(v, b, msg)
assert_array_equal(j, i1, msg)
msg = "check reverse indexes failed for type '%s'" % dt
v, j = unique(a, 0, 1)
assert_array_equal(v, b, msg)
assert_array_equal(j, i2, msg)
msg = "check with all indexes failed for type '%s'" % dt
v, j1, j2 = unique(a, 1, 1)
assert_array_equal(v, b, msg)
assert_array_equal(j1, i1, msg)
assert_array_equal(j2, i2, msg)
a = [5, 7, 1, 2, 1, 5, 7]*10
b = [1, 2, 5, 7]
i1 = [2, 3, 0, 1]
i2 = [2, 3, 0, 1, 0, 2, 3]*10
# test for numeric arrays
types = []
types.extend(np.typecodes['AllInteger'])
types.extend(np.typecodes['AllFloat'])
types.append('datetime64[D]')
types.append('timedelta64[D]')
for dt in types:
aa = np.array(a, dt)
bb = np.array(b, dt)
check_all(aa, bb, i1, i2, dt)
# test for object arrays
dt = 'O'
aa = np.empty(len(a), dt)
aa[:] = a
bb = np.empty(len(b), dt)
bb[:] = b
check_all(aa, bb, i1, i2, dt)
# test for structured arrays
dt = [('', 'i'), ('', 'i')]
aa = np.array(list(zip(a, a)), dt)
bb = np.array(list(zip(b, b)), dt)
check_all(aa, bb, i1, i2, dt)
def test_intersect1d(self):
# unique inputs
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5])
ec = np.array([1, 2, 5])
c = intersect1d(a, b, assume_unique=True)
assert_array_equal(c, ec)
# non-unique inputs
a = np.array([5, 5, 7, 1, 2])
b = np.array([2, 1, 4, 3, 3, 1, 5])
ed = np.array([1, 2, 5])
c = intersect1d(a, b)
assert_array_equal(c, ed)
assert_array_equal([], intersect1d([], []))
def test_setxor1d(self):
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5])
ec = np.array([3, 4, 7])
c = setxor1d(a, b)
assert_array_equal(c, ec)
a = np.array([1, 2, 3])
b = np.array([6, 5, 4])
ec = np.array([1, 2, 3, 4, 5, 6])
c = setxor1d(a, b)
assert_array_equal(c, ec)
a = np.array([1, 8, 2, 3])
b = np.array([6, 5, 4, 8])
ec = np.array([1, 2, 3, 4, 5, 6])
c = setxor1d(a, b)
assert_array_equal(c, ec)
assert_array_equal([], setxor1d([], []))
def test_ediff1d(self):
zero_elem = np.array([])
one_elem = np.array([1])
two_elem = np.array([1, 2])
assert_array_equal([], ediff1d(zero_elem))
assert_array_equal([0], ediff1d(zero_elem, to_begin=0))
assert_array_equal([0], ediff1d(zero_elem, to_end=0))
assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0))
assert_array_equal([], ediff1d(one_elem))
assert_array_equal([1], ediff1d(two_elem))
def test_in1d(self):
# we use two different sizes for the b array here to test the
# two different paths in in1d().
for mult in (1, 10):
# One check without np.array, to make sure lists are handled correct
a = [5, 7, 1, 2]
b = [2, 4, 3, 1, 5] * mult
ec = np.array([True, False, True, True])
c = in1d(a, b, assume_unique=True)
assert_array_equal(c, ec)
a[0] = 8
ec = np.array([False, False, True, True])
c = in1d(a, b, assume_unique=True)
assert_array_equal(c, ec)
a[0], a[3] = 4, 8
ec = np.array([True, False, True, False])
c = in1d(a, b, assume_unique=True)
assert_array_equal(c, ec)
a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
b = [2, 3, 4] * mult
ec = [False, True, False, True, True, True, True, True, True, False,
True, False, False, False]
c = in1d(a, b)
assert_array_equal(c, ec)
b = b + [5, 5, 4] * mult
ec = [True, True, True, True, True, True, True, True, True, True,
True, False, True, True]
c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5] * mult)
ec = np.array([True, False, True, True])
c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5, 7, 1, 1, 2])
b = np.array([2, 4, 3, 3, 1, 5] * mult)
ec = np.array([True, False, True, True, True])
c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5, 5])
b = np.array([2, 2] * mult)
ec = np.array([False, False])
c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5])
b = np.array([2])
ec = np.array([False])
c = in1d(a, b)
assert_array_equal(c, ec)
assert_array_equal(in1d([], []), [])
def test_in1d_char_array(self):
a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b'])
b = np.array(['a', 'c'])
ec = np.array([True, False, True, False, False, True, False, False])
c = in1d(a, b)
assert_array_equal(c, ec)
def test_in1d_invert(self):
"Test in1d's invert parameter"
# We use two different sizes for the b array here to test the
# two different paths in in1d().
for mult in (1, 10):
a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
b = [2, 3, 4] * mult
assert_array_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
def test_in1d_ravel(self):
# Test that in1d ravels its input arrays. This is not documented
# behavior however. The test is to ensure consistentency.
a = np.arange(6).reshape(2, 3)
b = np.arange(3, 9).reshape(3, 2)
long_b = np.arange(3, 63).reshape(30, 2)
ec = np.array([False, False, False, True, True, True])
assert_array_equal(in1d(a, b, assume_unique=True), ec)
assert_array_equal(in1d(a, b, assume_unique=False), ec)
assert_array_equal(in1d(a, long_b, assume_unique=True), ec)
assert_array_equal(in1d(a, long_b, assume_unique=False), ec)
def test_union1d(self):
a = np.array([5, 4, 7, 1, 2])
b = np.array([2, 4, 3, 3, 2, 1, 5])
ec = np.array([1, 2, 3, 4, 5, 7])
c = union1d(a, b)
assert_array_equal(c, ec)
assert_array_equal([], union1d([], []))
def test_setdiff1d(self):
a = np.array([6, 5, 4, 7, 1, 2, 7, 4])
b = np.array([2, 4, 3, 3, 2, 1, 5])
ec = np.array([6, 7])
c = setdiff1d(a, b)
assert_array_equal(c, ec)
a = np.arange(21)
b = np.arange(19)
ec = np.array([19, 20])
c = setdiff1d(a, b)
assert_array_equal(c, ec)
assert_array_equal([], setdiff1d([], []))
def test_setdiff1d_char_array(self):
a = np.array(['a', 'b', 'c'])
b = np.array(['a', 'b', 's'])
assert_array_equal(setdiff1d(a, b), np.array(['c']))
def test_manyways(self):
a = np.array([5, 7, 1, 2, 8])
b = np.array([9, 8, 2, 4, 3, 1, 5])
c1 = setxor1d(a, b)
aux1 = intersect1d(a, b)
aux2 = union1d(a, b)
c2 = setdiff1d(aux2, aux1)
assert_array_equal(c1, c2)
if __name__ == "__main__":
run_module_suite()
|
|
from functools import lru_cache
import io
import os
from pathlib import Path
import sys
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
Optional,
Pattern,
Sequence,
Tuple,
Union,
TYPE_CHECKING,
)
from mypy_extensions import mypyc_attr
from pathspec import PathSpec
from pathspec.patterns.gitwildmatch import GitWildMatchPatternError
if sys.version_info >= (3, 11):
import tomllib
else:
import tomli as tomllib
from black.output import err
from black.report import Report
from black.handle_ipynb_magics import jupyter_dependencies_are_installed
if TYPE_CHECKING:
import colorama # noqa: F401
@lru_cache()
def find_project_root(srcs: Sequence[str]) -> Tuple[Path, str]:
"""Return a directory containing .git, .hg, or pyproject.toml.
That directory will be a common parent of all files and directories
passed in `srcs`.
If no directory in the tree contains a marker that would specify it's the
project root, the root of the file system is returned.
Returns a two-tuple with the first element as the project root path and
the second element as a string describing the method by which the
project root was discovered.
"""
if not srcs:
srcs = [str(Path.cwd().resolve())]
path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs]
# A list of lists of parents for each 'src'. 'src' is included as a
# "parent" of itself if it is a directory
src_parents = [
list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs
]
common_base = max(
set.intersection(*(set(parents) for parents in src_parents)),
key=lambda path: path.parts,
)
for directory in (common_base, *common_base.parents):
if (directory / ".git").exists():
return directory, ".git directory"
if (directory / ".hg").is_dir():
return directory, ".hg directory"
if (directory / "pyproject.toml").is_file():
return directory, "pyproject.toml"
return directory, "file system root"
def find_pyproject_toml(path_search_start: Tuple[str, ...]) -> Optional[str]:
"""Find the absolute filepath to a pyproject.toml if it exists"""
path_project_root, _ = find_project_root(path_search_start)
path_pyproject_toml = path_project_root / "pyproject.toml"
if path_pyproject_toml.is_file():
return str(path_pyproject_toml)
try:
path_user_pyproject_toml = find_user_pyproject_toml()
return (
str(path_user_pyproject_toml)
if path_user_pyproject_toml.is_file()
else None
)
except (PermissionError, RuntimeError) as e:
# We do not have access to the user-level config directory, so ignore it.
err(f"Ignoring user configuration directory due to {e!r}")
return None
@mypyc_attr(patchable=True)
def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
"""Parse a pyproject toml file, pulling out relevant parts for Black
If parsing fails, will raise a tomllib.TOMLDecodeError
"""
with open(path_config, "rb") as f:
pyproject_toml = tomllib.load(f)
config = pyproject_toml.get("tool", {}).get("black", {})
return {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
@lru_cache()
def find_user_pyproject_toml() -> Path:
r"""Return the path to the top-level user configuration for black.
This looks for ~\.black on Windows and ~/.config/black on Linux and other
Unix systems.
May raise:
- RuntimeError: if the current user has no homedir
- PermissionError: if the current process cannot access the user's homedir
"""
if sys.platform == "win32":
# Windows
user_config_path = Path.home() / ".black"
else:
config_root = os.environ.get("XDG_CONFIG_HOME", "~/.config")
user_config_path = Path(config_root).expanduser() / "black"
return user_config_path.resolve()
@lru_cache()
def get_gitignore(root: Path) -> PathSpec:
"""Return a PathSpec matching gitignore content if present."""
gitignore = root / ".gitignore"
lines: List[str] = []
if gitignore.is_file():
with gitignore.open(encoding="utf-8") as gf:
lines = gf.readlines()
try:
return PathSpec.from_lines("gitwildmatch", lines)
except GitWildMatchPatternError as e:
err(f"Could not parse {gitignore}: {e}")
raise
def normalize_path_maybe_ignore(
path: Path,
root: Path,
report: Optional[Report] = None,
) -> Optional[str]:
"""Normalize `path`. May return `None` if `path` was ignored.
`report` is where "path ignored" output goes.
"""
try:
abspath = path if path.is_absolute() else Path.cwd() / path
normalized_path = abspath.resolve()
try:
root_relative_path = normalized_path.relative_to(root).as_posix()
except ValueError:
if report:
report.path_ignored(
path, f"is a symbolic link that points outside {root}"
)
return None
except OSError as e:
if report:
report.path_ignored(path, f"cannot be read because {e}")
return None
return root_relative_path
def path_is_excluded(
normalized_path: str,
pattern: Optional[Pattern[str]],
) -> bool:
match = pattern.search(normalized_path) if pattern else None
return bool(match and match.group(0))
def gen_python_files(
paths: Iterable[Path],
root: Path,
include: Pattern[str],
exclude: Pattern[str],
extend_exclude: Optional[Pattern[str]],
force_exclude: Optional[Pattern[str]],
report: Report,
gitignore: Optional[PathSpec],
*,
verbose: bool,
quiet: bool,
) -> Iterator[Path]:
"""Generate all files under `path` whose paths are not excluded by the
`exclude_regex`, `extend_exclude`, or `force_exclude` regexes,
but are included by the `include` regex.
Symbolic links pointing outside of the `root` directory are ignored.
`report` is where output about exclusions goes.
"""
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
for child in paths:
normalized_path = normalize_path_maybe_ignore(child, root, report)
if normalized_path is None:
continue
# First ignore files matching .gitignore, if passed
if gitignore is not None and gitignore.match_file(normalized_path):
report.path_ignored(child, "matches the .gitignore file content")
continue
# Then ignore with `--exclude` `--extend-exclude` and `--force-exclude` options.
normalized_path = "/" + normalized_path
if child.is_dir():
normalized_path += "/"
if path_is_excluded(normalized_path, exclude):
report.path_ignored(child, "matches the --exclude regular expression")
continue
if path_is_excluded(normalized_path, extend_exclude):
report.path_ignored(
child, "matches the --extend-exclude regular expression"
)
continue
if path_is_excluded(normalized_path, force_exclude):
report.path_ignored(child, "matches the --force-exclude regular expression")
continue
if child.is_dir():
# If gitignore is None, gitignore usage is disabled, while a Falsey
# gitignore is when the directory doesn't have a .gitignore file.
yield from gen_python_files(
child.iterdir(),
root,
include,
exclude,
extend_exclude,
force_exclude,
report,
gitignore + get_gitignore(child) if gitignore is not None else None,
verbose=verbose,
quiet=quiet,
)
elif child.is_file():
if child.suffix == ".ipynb" and not jupyter_dependencies_are_installed(
verbose=verbose, quiet=quiet
):
continue
include_match = include.search(normalized_path) if include else True
if include_match:
yield child
def wrap_stream_for_windows(
f: io.TextIOWrapper,
) -> Union[io.TextIOWrapper, "colorama.AnsiToWin32"]:
"""
Wrap stream with colorama's wrap_stream so colors are shown on Windows.
If `colorama` is unavailable, the original stream is returned unmodified.
Otherwise, the `wrap_stream()` function determines whether the stream needs
to be wrapped for a Windows environment and will accordingly either return
an `AnsiToWin32` wrapper or the original stream.
"""
try:
from colorama.initialise import wrap_stream
except ImportError:
return f
else:
# Set `strip=False` to avoid needing to modify test_express_diff_with_color.
return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True)
|
|
#
#
# =================================================================
# =================================================================
"""LogicalPartition interface."""
from paxes_cinder.k2aclient import _
from paxes_cinder.k2aclient import base
from paxes_cinder.k2aclient.v1 import k2uom
from paxes_cinder.k2aclient.v1 import k2web
from paxes_cinder.k2aclient.exceptions import K2JobFailure
from eventlet import greenthread
class LogicalPartitionManager(base.ManagerWithFind):
"""Manage :class:`LogicalPartition` resources."""
resource_class = k2uom.LogicalPartition
def new(self):
return self.resource_class(self, None)
def list(self, managedsystem, xa=None):
"""Get a list of all LogicalPartitions for a particular
ManagedSystem accessed through a particular hmc.
:rtype: list of :class:`LogicalPartition`.
"""
return self._list("/rest/api/uom/ManagedSystem/%s/LogicalPartition"
% managedsystem,
xa=xa)
def listasroot(self, xa=None):
"""Get a list of all LogicalPartitions
accessed through a particular hmc.
:rtype: list of :class:`LogicalPartition`.
"""
return self._list("/rest/api/uom/LogicalPartition", xa=xa)
def get(self, managedsystem, logicalpartition, xa=None):
"""Given managedsytem, get a specific LogicalPartition.
:param logicalpartition: The ID of the :class:`LogicalPartition`.
:rtype: :class:`LogicalPartition`
"""
return self._get("/rest/api/uom/ManagedSystem/%s/LogicalPartition/%s"
% (managedsystem, logicalpartition,),
xa=xa)
def getasroot(self, logicalpartition, xa=None):
"""Get a specific LogicalPartition.
:param logicalpartition: The ID of the :class:`LogicalPartition`.
:rtype: :class:`LogicalPartition`
"""
return self._get("/rest/api/uom/LogicalPartition/%s" %
logicalpartition,
xa=xa)
def create(self, logicalpartition, child=None, xa=None):
"""Create the specified instance
"""
return self._create("uom", logicalpartition,
child=child,
xa=xa)
def delete(self, logicalpartition, child=None, xa=None):
"""Delete the specified instance
"""
return self._delete("uom", logicalpartition,
child=child,
xa=xa)
def deletebyid(self, logicalpartition_id,
child_type=None, child_id=None, xa=None):
"""Delete the specified instance
"""
return self._deletebyid("uom", "LogicalPartition", logicalpartition_id,
child_type=child_type,
child_id=child_id,
xa=xa)
########
def power_on(self, logicalpartition, xa=None):
"""For specified logicalpartition, power it on
:param logicalpartition_id: Instance of the :class:`LogicalPartition`
"""
if self.api.client.k2operator is None:
self.api.client.authenticate()
jpc = k2web.JobParameter_Collection()
jrequest = self.api.web_job.getjob(logicalpartition, 'PowerOn',
xa=xa)
jrequest.job_parameters = jpc
jresponse = self.api.web_job.runjob(logicalpartition, jrequest,
xa=xa)
k2respi = jresponse._k2resp
while jresponse.status == 'NOT_STARTED' or \
jresponse.status == 'RUNNING':
greenthread.sleep(1)
jresponse = self.api.web_job.readjob(jresponse.job_id,
xa=xa)
if not jresponse.status.startswith("COMPLETED"):
diagfspeci = self.api.exclogger.emit("JOB", "power_on",
k2respi)
diagfspec = self.api.exclogger.emit("JOB", "power_on",
jresponse._k2resp)
msg = _("k2aclient:"
" during power_on,"
" for LogicalPartition: >%(logicalpartition.id)s<,"
" failed to power on due to"
" job failure,"
" job_id: >%(jresponse.job_id)s<,"
" status: >%(jresponse.status)s<,"
" input K2 job diagnostics have been"
" written to: >%(diagfspeci)s<,"
" response k2 job diagnostics have been"
" written to: >%(diagfspec)s<")
raise K2JobFailure(msg %
{"logicalpartition.id": logicalpartition.id,
"jresponse.job_id": jresponse.job_id,
"jresponse.status": jresponse.status,
"diagfspeci": diagfspeci,
"diagfspec": diagfspec, },
jresponse._k2resp,
diagfspeci=diagfspeci,
diagfspec=diagfspec)
return jresponse.status, jresponse.job_id
def power_off(self, logicalpartition, xa=None):
"""For specified logicalpartition, power it on
:param logicalpartition_id: Instance of the :class:`LogicalPartition`
"""
if self.api.client.k2operator is None:
self.api.client.authenticate()
jpc = k2web.JobParameter_Collection()
jp = k2web.JobParameter()
jp.parameter_name = "operation"
jp.parameter_value = "shutdown"
jpc.job_parameter.append(jp)
jp = k2web.JobParameter()
jp.parameter_name = "immediate"
jp.parameter_value = "true"
jpc.job_parameter.append(jp)
jrequest = self.api.web_job.getjob(logicalpartition, 'PowerOff',
xa=xa)
jrequest.job_parameters = jpc
jresponse = self.api.web_job.runjob(logicalpartition, jrequest,
xa=xa)
k2respi = jresponse._k2resp
while jresponse.status == 'NOT_STARTED' or \
jresponse.status == 'RUNNING':
greenthread.sleep(1)
jresponse = self.api.web_job.readjob(jresponse.job_id,
xa=xa)
if not jresponse.status.startswith("COMPLETED"):
diagfspeci = self.api.exclogger.emit("JOB", "power_off",
k2respi)
diagfspec = self.api.exclogger.emit("JOB", "power_off",
jresponse._k2resp)
msg = _("k2aclient:"
" during power_on,"
" for LogicalPartition: >%(logicalpartition.id)s<,"
" failed to power on due to"
" job failure,"
" job_id: >%(jresponse.job_id)s<,"
" status: >%(jresponse.status)s<,"
" input K2 job diagnostics have been"
" written to: >%(diagfspeci)s<,"
" response k2 job diagnostics have been"
" written to: >%(diagfspec)s<")
raise K2JobFailure(msg %
{"logicalpartition.id": logicalpartition.id,
"jresponse.job_id": jresponse.job_id,
"jresponse.status": jresponse.status,
"diagfspeci": diagfspeci,
"diagfspec": diagfspec, },
jresponse._k2resp,
diagfspeci=diagfspeci,
diagfspec=diagfspec)
return jresponse.status, jresponse.job_id
|
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import io
import os
import shutil
import struct
from typing import BinaryIO
import unittest
import wave
from multispecies_whale_detection import xwav
# WAVE file chunks start with a 4-byte chunk identifier, followed by a 32-bit
# unsigned integer size.
CHUNK_PREAMBLE_LENGTH = 8
HARP_CHUNK_ID = b'harp'
def fixture_path(basename: str) -> str:
return os.path.join(os.path.dirname(__file__), basename)
def fixture_header():
"""Returns the expected parsed contents of the fixture HARP header.
(This is the start of one XWAV file from a real deployment, but the audio is
not included, since real XWAV files are typically much too large to live in
source control.)
"""
return xwav.Header(
fmt_chunk=xwav.FmtChunk(
audio_format=xwav.AudioFormat.PCM,
num_channels=1,
sample_rate=10000,
bytes_per_second=20000,
block_align=2,
bits_per_sample=16,
),
harp_chunk=xwav.HarpChunk(
wav_version_number=1,
firmware_version_number='V2.01A',
instrument_id='DL41',
site_name='',
experiment_name='Kauai01',
disk_sequence_number=11,
disk_serial_number='12345678',
longitude=-159.53383,
latitude=21.57224,
depth=720,
subchunks=[
xwav.Subchunk(
time=datetime.datetime(
2010, 2, 28, 2, 21, 15, tzinfo=datetime.timezone.utc),
byte_loc=34444,
byte_length=1500000,
write_length=3000,
sample_rate=10000,
gain=1),
xwav.Subchunk(
time=datetime.datetime(
2010, 2, 28, 2, 22, 30, tzinfo=datetime.timezone.utc),
byte_loc=1534444,
byte_length=1500000,
write_length=3000,
sample_rate=10000,
gain=1)
]),
)
def fixture_two_chunk_plain_wav() -> BinaryIO:
"""Creates a fixture WAVE file with two distinct sections.
The audio is 100Hz mono. Each section 10 samples long. Samples in the first
alternate between +/-(1 << 5) and in the second between +/-(1 << 10).
Returns:
File-like object with the bytes of the fixture WAVE file, positioned at the
beginning.
"""
sample_rate = 100
chunk_duration_samples = 10
plain_wav_io = io.BytesIO()
with wave.open(plain_wav_io, 'wb') as writer:
writer.setnchannels(1)
writer.setsampwidth(2)
writer.setframerate(sample_rate)
signs = [pow(-1, i) for i in range(chunk_duration_samples)]
for magnitude in [(1 << 5), (1 << 10)]:
writer.writeframes(
struct.pack('<%dh' % len(signs), *[magnitude * s for s in signs]))
plain_wav_io.seek(0)
return plain_wav_io
def wav_data_start(reader: BinaryIO) -> int:
"""Returns the byte position of the first audio sample in a WAVE file."""
pos = reader.tell()
reader.seek(0)
with wave.open(reader, 'rb') as wav_reader:
wav_reader.rewind()
data_start = reader.tell()
reader.seek(pos)
return data_start
def fixture_two_chunk_harp_chunk(data_start: int) -> xwav.HarpChunk:
"""Returns a HarpChunk describing fixture_two_chunk_plain_wav.
This has two Subchunks, corresponding to the "sections" described in the
docstring of fixture_two_chunk_plain_wav. Their start times and the rest of
the metadata are valid but have arbitrary values.
Args:
data_start: Index of the first byte of audio in the plain WAV.
Returns:
HarpChunk corresponding to the "two_chunk" fixture.
"""
def round_coord(c: float) -> float:
"""Rounds a float to the precision of XWAV's int lat/long."""
return round(c * xwav.HarpChunk.GEO_SCALE) / xwav.HarpChunk.GEO_SCALE
return xwav.HarpChunk(
wav_version_number=1,
firmware_version_number='V1.0',
instrument_id='FAKE',
site_name='abcd',
experiment_name='rewrite',
disk_sequence_number=1,
disk_serial_number='X123',
longitude=round_coord(-122.391562122),
latitude=round_coord(37.791205),
depth=6,
subchunks=[
xwav.Subchunk(
time=datetime.datetime(
2021, 7, 15, 0, 27, 00, tzinfo=datetime.timezone.utc),
byte_loc=data_start + 0,
byte_length=20,
write_length=20,
sample_rate=100,
gain=1),
xwav.Subchunk(
time=datetime.datetime(
2021, 7, 15, 0, 28, 00, tzinfo=datetime.timezone.utc),
byte_loc=data_start + 20,
byte_length=20,
write_length=20,
sample_rate=100,
gain=1)
],
)
def insert_harp_chunk(harp_chunk: xwav.HarpChunk,
wav_reader: BinaryIO) -> BinaryIO:
"""Inserts a HarpChunk into the headers of a WAVE file.
Args:
harp_chunk: The HarpChunk to insert.
wav_reader: File-like object positioned at the start of a WAVE file.
Returns:
File-like object positioned at the beginning of an XWAV file for which
harp_chunk has been serialized into a "harp" chunk between the "fmt " and
"data" chunks.
"""
# The implementation starts with the bytes of the plain WAVE fixture and
# populates a new BytesIO, in order, with knowledge of the details of the WAVE
# file format, for which a useful reference is
#
# http://soundfile.sapp.org/doc/WaveFormat/
xwav_io = io.BytesIO()
harp_chunk_io = io.BytesIO()
harp_chunk.write(harp_chunk_io)
serialized_harp_chunk = harp_chunk_io.getbuffer()
# Rewrite the entire file size.
harp_id_and_size = struct.pack('<4sI', HARP_CHUNK_ID,
len(serialized_harp_chunk))
entire_file_size = (
len(wav_reader.getbuffer()) + len(serialized_harp_chunk) +
len(harp_id_and_size))
riff_chunk = struct.pack('<4sI4s', b'RIFF',
entire_file_size - CHUNK_PREAMBLE_LENGTH, b'WAVE')
data_chunk_start = wav_data_start(wav_reader) - CHUNK_PREAMBLE_LENGTH
wav_reader.seek(len(riff_chunk))
fmt_chunk = wav_reader.read(data_chunk_start - len(riff_chunk))
xwav_io.write(riff_chunk)
xwav_io.write(fmt_chunk)
# "harp" chunk
xwav_io.write(harp_id_and_size)
xwav_io.write(serialized_harp_chunk)
shutil.copyfileobj(wav_reader, xwav_io)
xwav_io.seek(0)
return xwav_io
def fixture_two_chunk_xwav() -> BinaryIO:
"""Adds a HARP chunk to the headers of fixture_two_chunk_plain_wav.
This fixture enables testing subchunk-at-a-time reads and seeks to specified
subchunks.
Returns:
File-like object with the bytes of the fixture XWAV file, positioned at the
beginning.
"""
plain_wav_io = fixture_two_chunk_plain_wav()
data_start_no_harp = wav_data_start(plain_wav_io)
# The eventual size of the HARP chunk will be required for computing
# correct Subchunk.byte_loc values.
harp_chunk_size = xwav.HarpChunk.serialized_len(num_subchunks=2)
harp_id_and_size = struct.pack('<4sI', HARP_CHUNK_ID, harp_chunk_size)
data_start = data_start_no_harp + len(harp_id_and_size) + harp_chunk_size
return insert_harp_chunk(
fixture_two_chunk_harp_chunk(data_start=data_start), plain_wav_io)
class TestXwav(unittest.TestCase):
def test_header_from_wav(self):
with open(fixture_path('xwav_headers_only.x.wav'), 'rb') as reader:
header = xwav.header_from_wav(reader)
self.assertEqual(fixture_header(), header)
def test_read_empty_harp_chunk(self):
with self.assertRaises(xwav.CorruptHeadersError):
xwav.HarpChunk.read(io.BytesIO())
def test_read_write_read_harp_chunk(self):
# Since xwav_headers_only.x.wav has a "harp" chunk that was written out by
# Triton, a round trip through the library under test provides additional
# verification.
with open(fixture_path('xwav_headers_only.x.wav'), 'rb') as reader:
header = xwav.header_from_wav(reader)
harp_chunk = header.harp_chunk
rewritten_io = io.BytesIO()
harp_chunk.write(rewritten_io)
rewritten_io.seek(0)
reread_harp_chunk = xwav.HarpChunk.read(rewritten_io)
self.assertEqual(harp_chunk, reread_harp_chunk)
def test_header_from_flac(self):
with open(fixture_path('xwav_headers_only.x.flac'), 'rb') as reader:
header = xwav.header_from_flac(reader)
self.assertEqual(fixture_header(), header)
def test_plain_wav_header_is_none(self):
with self.assertRaises(xwav.MissingChunkError):
with open(fixture_path('plain_headers.wav'), 'rb') as reader:
_ = xwav.header_from_wav(reader)
def test_plain_flac_header_is_none(self):
with self.assertRaises(xwav.MissingChunkError):
with open(fixture_path('plain_headers.flac'), 'rb') as reader:
_ = xwav.header_from_flac(reader)
def test_empty_wav_raises(self):
with self.assertRaises(xwav.CorruptHeadersError):
_ = xwav.header_from_wav(io.BytesIO())
def test_corrupt_wav_raises(self):
with self.assertRaises(xwav.CorruptHeadersError):
_ = xwav.header_from_wav(io.BytesIO(b'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'))
def test_corrupt_flac_raises(self):
with self.assertRaises(xwav.CorruptHeadersError):
_ = xwav.header_from_flac(io.BytesIO(b'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'))
def test_initialize_wav_reader(self):
with open(fixture_path('xwav_headers_only.x.wav'), 'rb') as file_reader:
reader = xwav.Reader(file_reader)
self.assertIsNotNone(reader.header)
def test_reader_harp_chunk(self):
xwav_io: BinaryIO = fixture_two_chunk_xwav()
fixture_harp_chunk = fixture_two_chunk_harp_chunk(
data_start=wav_data_start(xwav_io))
reader = xwav.Reader(xwav_io)
reader_harp_chunk = reader.header.harp_chunk
self.assertEqual(fixture_harp_chunk, reader_harp_chunk)
def reader_reads_two_chunk_fixture_template(self, reader):
"""Test template for WAV and FLAC Reader success cases.
Args:
reader: A newly-initialized Reader for fixture_two_chunk_xwav.
"""
iter_subchunks = iter(reader)
first_subchunk, first_samples = next(iter_subchunks)
second_subchunk, second_samples = next(iter_subchunks)
with self.assertRaises(StopIteration):
next(iter_subchunks)
# Non-comprehensive validation of values most likely to be interesting.
# To understand the hard-coded constants, check against the implementation
# of fixture_two_chunk_plain_wav.
self.assertEqual(20, first_subchunk.byte_length)
self.assertEqual(20, second_subchunk.byte_length)
self.assertEqual(second_subchunk.byte_loc,
first_subchunk.byte_loc + first_subchunk.byte_length)
for sample in first_samples:
self.assertEqual(1 << 5, abs(sample))
for sample in second_samples:
self.assertEqual(1 << 10, abs(sample))
def test_reader_reads_two_chunk_fixture(self):
xwav_io: BinaryIO = fixture_two_chunk_xwav()
reader = xwav.Reader(xwav_io)
self.reader_reads_two_chunk_fixture_template(reader)
def test_round_trip_read_write_read(self):
xwav_io: BinaryIO = fixture_two_chunk_xwav()
reader = xwav.Reader(xwav_io)
fmt_chunk = reader.header.fmt_chunk
harp_chunk = reader.header.harp_chunk
subchunk_generator = (
(subchunk.time, samples) for subchunk, samples in reader)
write_output = io.BytesIO()
xwav.write(
num_channels=fmt_chunk.num_channels,
sample_rate=fmt_chunk.sample_rate,
harp_chunk=harp_chunk,
subchunks=subchunk_generator,
subchunks_len=len(harp_chunk.subchunks),
output=write_output,
)
write_output.seek(0)
rereader = xwav.Reader(write_output)
self.reader_reads_two_chunk_fixture_template(rereader)
def test_read_out_of_range(self):
# The subchunks in this fixture file actually reference audio beyond the end
# of the file.
with open(fixture_path('xwav_headers_only.x.wav'), 'rb') as file_reader:
reader = xwav.Reader(file_reader)
with self.assertRaises(xwav.OutOfRangeError):
next(iter(reader))
def test_read_by_index(self):
xwav_io: BinaryIO = fixture_two_chunk_xwav()
subchunk_index = 1 # aritrarily the second subchunk of the fixture
reader = xwav.Reader(xwav_io)
subchunk, samples = reader[subchunk_index]
self.assertEqual(10, len(samples))
self.assertEqual(subchunk, reader.subchunks[subchunk_index])
def test_initialize_reader_flac(self):
with open(fixture_path('fixture_two_chunk_xwav.x.flac'), 'rb') as infile:
reader = xwav.Reader(infile)
self.assertIsNotNone(reader.header)
def test_reader_reads_two_chunk_fixture_flac(self):
with open(fixture_path('fixture_two_chunk_xwav.x.flac'), 'rb') as infile:
reader = xwav.Reader(infile)
self.reader_reads_two_chunk_fixture_template(reader)
def test_reader_reads_from_bytesio_flac(self):
flac_io = io.BytesIO()
with open(fixture_path('fixture_two_chunk_xwav.x.flac'), 'rb') as infile:
flac_io.write(infile.read())
flac_io.seek(0)
reader = xwav.Reader(flac_io)
self.reader_reads_two_chunk_fixture_template(reader)
if __name__ == '__main__':
unittest.main()
|
|
"""
SoftLayer.tests.CLI.modules.file_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
from SoftLayer import testing
import json
import mock
class FileTests(testing.TestCase):
def test_access_list(self):
result = self.run_command(['file', 'access-list', '1234'])
self.assert_no_fail(result)
def test_authorize_host_to_volume(self):
result = self.run_command(['file', 'access-authorize', '12345678',
'--hardware-id=100', '--virtual-id=10',
'--ip-address-id=192',
'--ip-address=192.3.2.1',
'--subnet-id=200'])
self.assert_no_fail(result)
def test_deauthorize_host_to_volume(self):
result = self.run_command(['file', 'access-revoke', '12345678',
'--hardware-id=100', '--virtual-id=10',
'--ip-address-id=192',
'--ip-address=192.3.2.1',
'--subnet-id=200'])
self.assert_no_fail(result)
def test_volume_list(self):
result = self.run_command(['file', 'volume-list'])
self.assert_no_fail(result)
self.assertEqual([
{
'bytes_used': None,
'capacity_gb': 10,
'datacenter': 'Dallas',
'id': 1,
'ip_addr': '127.0.0.1',
'storage_type': 'ENDURANCE',
'username': 'user',
'active_transactions': None,
'mount_addr': '127.0.0.1:/TEST',
'rep_partner_count': None
}],
json.loads(result.output))
@mock.patch('SoftLayer.FileStorageManager.list_file_volumes')
def test_volume_count(self, list_mock):
list_mock.return_value = [
{'serviceResource': {'datacenter': {'name': 'dal09'}}},
{'serviceResource': {'datacenter': {'name': 'ams01'}}},
{'serviceResource': {'datacenter': {'name': 'ams01'}}}
]
result = self.run_command(['file', 'volume-count'])
self.assert_no_fail(result)
self.assertEqual(
{
'ams01': 2,
'dal09': 1
},
json.loads(result.output))
def test_snapshot_list(self):
result = self.run_command(['file', 'snapshot-list', '1234'])
self.assert_no_fail(result)
self.assertEqual([
{
'id': 470,
'name': 'unit_testing_note',
'created': '2016-07-06T07:41:19-05:00',
'size_bytes': '42',
}],
json.loads(result.output))
def test_volume_cancel(self):
result = self.run_command([
'--really', 'file', 'volume-cancel', '1234'])
self.assert_no_fail(result)
self.assertEqual('File volume with id 1234 has been marked'
' for cancellation\n', result.output)
self.assert_called_with('SoftLayer_Billing_Item', 'cancelItem',
args=(False, True, None))
def test_volume_detail(self):
result = self.run_command(['file', 'volume-detail', '1234'])
self.assert_no_fail(result)
self.assertEqual({
'Username': 'username',
'Used Space': '0B',
'Endurance Tier': 'READHEAVY_TIER',
'IOPs': 1000,
'Mount Address': '127.0.0.1:/TEST',
'Snapshot Capacity (GB)': '10',
'Snapshot Used (Bytes)': 1024,
'Capacity (GB)': '20GB',
'Target IP': '10.1.2.3',
'Data Center': 'dal05',
'Type': 'ENDURANCE',
'ID': 100,
'# of Active Transactions': '1',
'Ongoing Transaction': 'This is a buffer time in which the customer may cancel the server',
'Replicant Count': '1',
'Replication Status': 'Replicant Volume Provisioning '
'has completed.',
'Replicant Volumes': [[
{'Replicant ID': 'Volume Name', '1784': 'TEST_REP_1'},
{'Replicant ID': 'Target IP', '1784': '10.3.174.79'},
{'Replicant ID': 'Data Center', '1784': 'wdc01'},
{'Replicant ID': 'Schedule', '1784': 'REPLICATION_HOURLY'},
], [
{'Replicant ID': 'Volume Name', '1785': 'TEST_REP_2'},
{'Replicant ID': 'Target IP', '1785': '10.3.177.84'},
{'Replicant ID': 'Data Center', '1785': 'dal01'},
{'Replicant ID': 'Schedule', '1785': 'REPLICATION_DAILY'},
]],
'Original Volume Properties': [
{'Property': 'Original Volume Size',
'Value': '20'},
{'Property': 'Original Volume Name',
'Value': 'test-original-volume-name'},
{'Property': 'Original Snapshot Name',
'Value': 'test-original-snapshot-name'}
]
}, json.loads(result.output))
def test_volume_order_performance_iops_not_given(self):
result = self.run_command(['file', 'volume-order',
'--storage-type=performance', '--size=20',
'--location=dal05'])
self.assertEqual(2, result.exit_code)
def test_volume_order_performance_iops_not_multiple_of_100(self):
result = self.run_command(['file', 'volume-order',
'--storage-type=performance', '--size=20',
'--iops=122', '--location=dal05'])
self.assertEqual(2, result.exit_code)
def test_volume_order_performance_snapshot_error(self):
result = self.run_command(['file', 'volume-order',
'--storage-type=performance', '--size=20',
'--iops=100', '--location=dal05',
'--snapshot-size=10',
'--service-offering=performance'])
self.assertEqual(2, result.exit_code)
@mock.patch('SoftLayer.FileStorageManager.order_file_volume')
def test_volume_order_performance(self, order_mock):
order_mock.return_value = {
'placedOrder': {
'id': 478,
'items': [
{'description': 'Performance Storage'},
{'description': 'File Storage'},
{'description': '0.25 IOPS per GB'},
{'description': '20 GB Storage Space'},
{'description': '10 GB Storage Space (Snapshot Space)'}]
}
}
result = self.run_command(['file', 'volume-order',
'--storage-type=performance', '--size=20',
'--iops=100', '--location=dal05',
'--snapshot-size=10'])
self.assert_no_fail(result)
self.assertEqual(result.output,
'Order #478 placed successfully!\n'
' > Performance Storage\n > File Storage\n'
' > 0.25 IOPS per GB\n > 20 GB Storage Space\n'
' > 10 GB Storage Space (Snapshot Space)\n')
def test_volume_order_endurance_tier_not_given(self):
result = self.run_command(['file', 'volume-order',
'--storage-type=endurance', '--size=20',
'--location=dal05'])
self.assertEqual(2, result.exit_code)
@mock.patch('SoftLayer.FileStorageManager.order_file_volume')
def test_volume_order_endurance(self, order_mock):
order_mock.return_value = {
'placedOrder': {
'id': 478,
'items': [
{'description': 'Endurance Storage'},
{'description': 'File Storage'},
{'description': '0.25 IOPS per GB'},
{'description': '20 GB Storage Space'},
{'description': '10 GB Storage Space (Snapshot Space)'}]
}
}
result = self.run_command(['file', 'volume-order',
'--storage-type=endurance', '--size=20',
'--tier=0.25', '--location=dal05',
'--snapshot-size=10'])
self.assert_no_fail(result)
self.assertEqual(result.output,
'Order #478 placed successfully!\n'
' > Endurance Storage\n > File Storage\n'
' > 0.25 IOPS per GB\n > 20 GB Storage Space\n'
' > 10 GB Storage Space (Snapshot Space)\n')
@mock.patch('SoftLayer.FileStorageManager.order_file_volume')
def test_volume_order_order_not_placed(self, order_mock):
order_mock.return_value = {}
result = self.run_command(['file', 'volume-order',
'--storage-type=endurance', '--size=20',
'--tier=0.25', '--location=dal05'])
self.assert_no_fail(result)
self.assertEqual(result.output,
'Order could not be placed! Please verify '
'your options and try again.\n')
def test_volume_order_hourly_billing_not_available(self):
result = self.run_command(['file', 'volume-order',
'--storage-type=endurance', '--size=20',
'--tier=0.25', '--location=dal10',
'--billing=hourly',
'--service-offering=enterprise'])
self.assertEqual(2, result.exit_code)
@mock.patch('SoftLayer.FileStorageManager.order_file_volume')
def test_volume_order_hourly_billing(self, order_mock):
order_mock.return_value = {
'placedOrder': {
'id': 479,
'items': [
{'description': 'Storage as a Service'},
{'description': 'File Storage'},
{'description': '20 GB Storage Space'},
{'description': '0.25 IOPS per GB'},
{'description': '10 GB Storage Space (Snapshot Space)'}]
}
}
result = self.run_command(['file', 'volume-order',
'--storage-type=endurance', '--size=20',
'--tier=0.25', '--location=dal05',
'--service-offering=storage_as_a_service',
'--billing=hourly', '--snapshot-size=10'])
self.assert_no_fail(result)
self.assertEqual(result.output,
'Order #479 placed successfully!\n'
' > Storage as a Service\n'
' > File Storage\n'
' > 20 GB Storage Space\n'
' > 0.25 IOPS per GB\n'
' > 10 GB Storage Space (Snapshot Space)\n')
@mock.patch('SoftLayer.FileStorageManager.order_file_volume')
def test_volume_order_performance_manager_error(self, order_mock):
order_mock.side_effect = ValueError('failure!')
result = self.run_command(['file', 'volume-order',
'--storage-type=performance', '--size=20',
'--iops=100', '--location=dal05'])
self.assertEqual(2, result.exit_code)
self.assertEqual('Argument Error: failure!', result.exception.message)
@mock.patch('SoftLayer.FileStorageManager.order_file_volume')
def test_volume_order_endurance_manager_error(self, order_mock):
order_mock.side_effect = ValueError('failure!')
result = self.run_command(['file', 'volume-order',
'--storage-type=endurance', '--size=20',
'--tier=0.25', '--location=dal05'])
self.assertEqual(2, result.exit_code)
self.assertEqual('Argument Error: failure!', result.exception.message)
def test_enable_snapshots(self):
result = self.run_command(['file', 'snapshot-enable', '12345678',
'--schedule-type=HOURLY', '--minute=10',
'--retention-count=5'])
self.assert_no_fail(result)
def test_disable_snapshots(self):
result = self.run_command(['file', 'snapshot-disable', '12345678',
'--schedule-type=HOURLY'])
self.assert_no_fail(result)
def test_list_volume_schedules(self):
result = self.run_command([
'file', 'snapshot-schedule-list', '12345678'])
self.assert_no_fail(result)
self.assertEqual([
{
"week": None,
"maximum_snapshots": None,
"hour": None,
"day_of_week": None,
"day": None,
"replication": None,
"date_of_month": None,
"month_of_year": None,
"active": "",
"date_created": "",
"type": "WEEKLY",
"id": 978,
"minute": '30'
},
{
"week": None,
"maximum_snapshots": None,
"hour": None,
"day_of_week": None,
"day": None,
"replication": '*',
"date_of_month": None,
"month_of_year": None,
"active": "",
"date_created": "",
"type": "INTERVAL",
"id": 988,
"minute": '*'
}
], json.loads(result.output))
def test_create_snapshot(self):
result = self.run_command(['file', 'snapshot-create', '12345678'])
self.assert_no_fail(result)
self.assertEqual('New snapshot created with id: 449\n', result.output)
@mock.patch('SoftLayer.FileStorageManager.create_snapshot')
def test_create_snapshot_unsuccessful(self, snapshot_mock):
snapshot_mock.return_value = []
result = self.run_command(['file', 'snapshot-create', '8', '-n=note'])
self.assertEqual('Error occurred while creating snapshot.\n'
'Ensure volume is not failed over or in another '
'state which prevents taking snapshots.\n',
result.output)
def test_snapshot_restore(self):
result = self.run_command(['file', 'snapshot-restore', '12345678',
'--snapshot-id=87654321'])
self.assert_no_fail(result)
self.assertEqual(result.output, 'File volume 12345678 is being'
' restored using snapshot 87654321\n')
def test_delete_snapshot(self):
result = self.run_command(['file', 'snapshot-delete', '12345678'])
self.assert_no_fail(result)
@mock.patch('SoftLayer.FileStorageManager.order_snapshot_space')
def test_snapshot_order_order_not_placed(self, order_mock):
order_mock.return_value = {}
result = self.run_command(['file', 'snapshot-order', '1234',
'--capacity=10', '--tier=0.25'])
self.assert_no_fail(result)
self.assertEqual(result.output,
'Order could not be placed! Please verify '
'your options and try again.\n')
@mock.patch('SoftLayer.FileStorageManager.order_snapshot_space')
def test_snapshot_order_performance_manager_error(self, order_mock):
order_mock.side_effect = ValueError('failure!')
result = self.run_command(['file', 'snapshot-order', '1234',
'--capacity=10', '--tier=0.25'])
self.assertEqual(2, result.exit_code)
self.assertEqual('Argument Error: failure!', result.exception.message)
@mock.patch('SoftLayer.FileStorageManager.order_snapshot_space')
def test_snapshot_order(self, order_mock):
order_mock.return_value = {
'placedOrder': {
'id': 8702,
'items': [{'description':
'10 GB Storage Space (Snapshot Space)'}],
'status': 'PENDING_APPROVAL',
}
}
result = self.run_command(['file', 'snapshot-order', '1234',
'--capacity=10', '--tier=0.25'])
self.assert_no_fail(result)
self.assertEqual(result.output,
'Order #8702 placed successfully!\n'
' > 10 GB Storage Space (Snapshot Space)\n'
' > Order status: PENDING_APPROVAL\n')
def test_snapshot_cancel(self):
result = self.run_command(['--really',
'file', 'snapshot-cancel', '1234'])
self.assert_no_fail(result)
self.assertEqual('File volume with id 1234 has been marked'
' for snapshot cancellation\n', result.output)
self.assert_called_with('SoftLayer_Billing_Item', 'cancelItem',
args=(False, True, None))
def test_replicant_failover(self):
result = self.run_command(['file', 'replica-failover', '12345678',
'--replicant-id=5678', '--immediate'])
self.assert_no_fail(result)
self.assertEqual('Failover to replicant is now in progress.\n',
result.output)
@mock.patch('SoftLayer.FileStorageManager.failover_to_replicant')
def test_replicant_failover_unsuccessful(self, failover_mock):
failover_mock.return_value = False
result = self.run_command(['file', 'replica-failover', '12345678',
'--replicant-id=5678'])
self.assertEqual('Failover operation could not be initiated.\n',
result.output)
def test_replicant_failback(self):
result = self.run_command(['file', 'replica-failback', '12345678',
'--replicant-id=5678'])
self.assert_no_fail(result)
self.assertEqual('Failback from replicant is now in progress.\n',
result.output)
@mock.patch('SoftLayer.FileStorageManager.failback_from_replicant')
def test_replicant_failback_unsuccessful(self, failback_mock):
failback_mock.return_value = False
result = self.run_command(['file', 'replica-failback', '12345678',
'--replicant-id=5678'])
self.assertEqual('Failback operation could not be initiated.\n',
result.output)
@mock.patch('SoftLayer.FileStorageManager.order_replicant_volume')
def test_replicant_order_order_not_placed(self, order_mock):
order_mock.return_value = {}
result = self.run_command(['file', 'replica-order', '100',
'--snapshot-schedule=DAILY',
'--location=dal05'])
self.assert_no_fail(result)
self.assertEqual(result.output,
'Order could not be placed! Please verify '
'your options and try again.\n')
@mock.patch('SoftLayer.FileStorageManager.order_replicant_volume')
def test_replicant_order(self, order_mock):
order_mock.return_value = {
'placedOrder': {
'id': 77309,
'items': [
{'description': 'Endurance Storage'},
{'description': '2 IOPS per GB'},
{'description': 'File Storage'},
{'description': '20 GB Storage Space'},
{'description': '10 GB Storage Space (Snapshot Space)'},
{'description': '20 GB Storage Space Replicant of: TEST'},
],
}
}
result = self.run_command(['file', 'replica-order', '100',
'--snapshot-schedule=DAILY',
'--location=dal05', '--tier=2'])
self.assert_no_fail(result)
self.assertEqual(result.output,
'Order #77309 placed successfully!\n'
' > Endurance Storage\n'
' > 2 IOPS per GB\n'
' > File Storage\n'
' > 20 GB Storage Space\n'
' > 10 GB Storage Space (Snapshot Space)\n'
' > 20 GB Storage Space Replicant of: TEST\n')
def test_replication_locations(self):
result = self.run_command(['file', 'replica-locations', '1234'])
self.assert_no_fail(result)
self.assertEqual(
{
'12345': 'Dallas 05',
},
json.loads(result.output))
@mock.patch('SoftLayer.FileStorageManager.get_replication_locations')
def test_replication_locations_unsuccessful(self, locations_mock):
locations_mock.return_value = False
result = self.run_command(['file', 'replica-locations', '1234'])
self.assert_no_fail(result)
self.assertEqual('No data centers compatible for replication.\n',
result.output)
def test_replication_partners(self):
result = self.run_command(['file', 'replica-partners', '1234'])
self.assert_no_fail(result)
self.assertEqual([
{
'ID': 1784,
'Account ID': 3000,
'Capacity (GB)': 20,
'Host ID': None,
'Guest ID': None,
'Hardware ID': None,
'Username': 'TEST_REP_1',
},
{
'ID': 1785,
'Account ID': 3001,
'Host ID': None,
'Guest ID': None,
'Hardware ID': None,
'Capacity (GB)': 20,
'Username': 'TEST_REP_2',
}],
json.loads(result.output))
@mock.patch('SoftLayer.FileStorageManager.get_replication_partners')
def test_replication_partners_unsuccessful(self, partners_mock):
partners_mock.return_value = False
result = self.run_command(['file', 'replica-partners', '1234'])
self.assertEqual(
'There are no replication partners for the given volume.\n',
result.output)
@mock.patch('SoftLayer.FileStorageManager.order_duplicate_volume')
def test_duplicate_order_exception_caught(self, order_mock):
order_mock.side_effect = ValueError('order attempt failed, oh noooo!')
result = self.run_command(['file', 'volume-duplicate', '100'])
self.assertEqual(2, result.exit_code)
self.assertEqual('Argument Error: order attempt failed, oh noooo!',
result.exception.message)
@mock.patch('SoftLayer.FileStorageManager.order_duplicate_volume')
def test_duplicate_order_order_not_placed(self, order_mock):
order_mock.return_value = {}
result = self.run_command(['file', 'volume-duplicate', '100',
'--duplicate-iops=1400'])
self.assert_no_fail(result)
self.assertEqual(result.output,
'Order could not be placed! Please verify '
'your options and try again.\n')
@mock.patch('SoftLayer.FileStorageManager.order_duplicate_volume')
def test_duplicate_order(self, order_mock):
order_mock.return_value = {
'placedOrder': {
'id': 24602,
'items': [{'description': 'Storage as a Service'}]
}
}
result = self.run_command(['file', 'volume-duplicate', '100',
'--origin-snapshot-id=470',
'--duplicate-size=250',
'--duplicate-tier=2',
'--duplicate-snapshot-size=20'])
self.assert_no_fail(result)
self.assertEqual(result.output,
'Order #24602 placed successfully!\n'
' > Storage as a Service\n')
@mock.patch('SoftLayer.FileStorageManager.order_duplicate_volume')
def test_duplicate_order_hourly_billing(self, order_mock):
order_mock.return_value = {
'placedOrder': {
'id': 24602,
'items': [{'description': 'Storage as a Service'}]
}
}
result = self.run_command(['file', 'volume-duplicate', '100',
'--origin-snapshot-id=470',
'--duplicate-size=250',
'--duplicate-tier=2', '--billing=hourly',
'--duplicate-snapshot-size=20'])
order_mock.assert_called_with('100', origin_snapshot_id=470,
duplicate_size=250, duplicate_iops=None,
duplicate_tier_level=2,
duplicate_snapshot_size=20,
hourly_billing_flag=True)
self.assert_no_fail(result)
self.assertEqual(result.output,
'Order #24602 placed successfully!\n'
' > Storage as a Service\n')
@mock.patch('SoftLayer.FileStorageManager.order_modified_volume')
def test_modify_order_exception_caught(self, order_mock):
order_mock.side_effect = ValueError('order attempt failed, noooo!')
result = self.run_command(['file', 'volume-modify', '102', '--new-size=1000'])
self.assertEqual(2, result.exit_code)
self.assertEqual('Argument Error: order attempt failed, noooo!', result.exception.message)
@mock.patch('SoftLayer.FileStorageManager.order_modified_volume')
def test_modify_order_order_not_placed(self, order_mock):
order_mock.return_value = {}
result = self.run_command(['file', 'volume-modify', '102', '--new-iops=1400'])
self.assert_no_fail(result)
self.assertEqual('Order could not be placed! Please verify your options and try again.\n', result.output)
@mock.patch('SoftLayer.FileStorageManager.order_modified_volume')
def test_modify_order(self, order_mock):
order_mock.return_value = {'placedOrder': {'id': 24602, 'items': [{'description': 'Storage as a Service'},
{'description': '1000 GBs'},
{'description': '4 IOPS per GB'}]}}
result = self.run_command(['file', 'volume-modify', '102', '--new-size=1000', '--new-tier=4'])
order_mock.assert_called_with('102', new_size=1000, new_iops=None, new_tier_level=4)
self.assert_no_fail(result)
self.assertEqual('Order #24602 placed successfully!\n > Storage as a Service\n > 1000 GBs\n > 4 IOPS per GB\n',
result.output)
|
|
#coding: UTF-8
"""
Test file/dir operations.
"""
import random
import re
from urllib import urlencode, quote
from tests.common.utils import randstring, urljoin
from tests.api.urls import DEFAULT_REPO_URL, REPOS_URL
from tests.api.apitestbase import ApiTestBase, USERNAME
class FilesApiTest(ApiTestBase):
def test_rename_file(self):
with self.get_tmp_repo() as repo:
name, furl = self.create_file(repo)
data = {
'operation': 'rename',
'newname': name + randstring(),
}
res = self.post(furl, data=data)
self.assertRegexpMatches(res.text, r'"http(.*)"')
def test_remove_file(self):
with self.get_tmp_repo() as repo:
_, furl = self.create_file(repo)
res = self.delete(furl)
self.assertEqual(res.text, '"success"')
def test_move_file(self):
with self.get_tmp_repo() as repo:
_, furl = self.create_file(repo)
# TODO: create another repo here, and use it as dst_repo
data = {
'operation': 'move',
'dst_repo': repo.repo_id,
'dst_dir': '/',
}
res = self.post(furl, data=data)
self.assertEqual(res.text, '"success"')
def test_copy_file(self):
with self.get_tmp_repo() as repo:
fname, _ = self.create_file(repo)
# TODO: create another repo here, and use it as dst_repo
dpath, _ = self.create_dir(repo)
fopurl = urljoin(repo.repo_url, 'fileops/copy/') + '?p=/'
data = {
'file_names': fname,
'dst_repo': repo.repo_id,
'dst_dir': dpath,
}
res = self.post(fopurl, data=data)
self.assertEqual(res.text, '"success"')
def test_download_file(self):
with self.get_tmp_repo() as repo:
fname, furl = self.create_file(repo)
res = self.get(furl)
self.assertRegexpMatches(res.text, '"http(.*)/%s"' % quote(fname))
def test_download_file_from_history(self):
with self.get_tmp_repo() as repo:
fname, _ = self.create_file(repo)
file_history_url = urljoin(repo.repo_url, 'history/') + \
'?p=/%s' % quote(fname)
res = self.get(file_history_url).json()
commit_id = res['commits'][0]['id']
self.assertEqual(len(commit_id), 40)
data = {
'p': fname,
'commit_id': commit_id,
}
query = '?' + urlencode(data)
res = self.get(repo.file_url + query)
self.assertRegexpMatches(res.text, r'"http(.*)/%s"' % quote(fname))
def test_get_file_detail(self):
with self.get_tmp_repo() as repo:
fname, _ = self.create_file(repo)
fdurl = repo.file_url + u'detail/?p=/%s' % quote(fname)
detail = self.get(fdurl).json()
self.assertIsNotNone(detail)
self.assertIsNotNone(detail['id'])
self.assertIsNotNone(detail['mtime'])
self.assertIsNotNone(detail['type'])
self.assertIsNotNone(detail['name'])
self.assertIsNotNone(detail['size'])
def test_get_file_history(self):
with self.get_tmp_repo() as repo:
fname, _ = self.create_file(repo)
fhurl = repo.file_url + u'history/?p=%s' % quote(fname)
history = self.get(fhurl).json()
for commit in history['commits']:
self.assertIsNotNone(commit['rev_file_size'])
#self.assertIsNotNone(commit['rev_file_id']) #allow null
self.assertIsNotNone(commit['ctime'])
self.assertIsNotNone(commit['creator_name'])
self.assertIsNotNone(commit['creator'])
self.assertIsNotNone(commit['root_id'])
#self.assertIsNotNone(commit['rev_renamed_old_path']) #allow null
#self.assertIsNotNone(commit['parent_id']) #allow null
self.assertIsNotNone(commit['new_merge'])
self.assertIsNotNone(commit['repo_id'])
self.assertIsNotNone(commit['desc'])
self.assertIsNotNone(commit['id'])
self.assertIsNotNone(commit['conflict'])
#self.assertIsNotNone(commit['second_parent_id']) #allow null
def test_get_upload_link(self):
with self.get_tmp_repo() as repo:
upload_url = urljoin(repo.repo_url, 'upload-link')
res = self.get(upload_url)
self.assertRegexpMatches(res.text, r'"http(.*)/upload-api/[^/]+"')
def test_get_update_link(self):
with self.get_tmp_repo() as repo:
update_url = urljoin(repo.repo_url, 'update-link')
res = self.get(update_url)
self.assertRegexpMatches(res.text, r'"http(.*)/update-api/[^/]+"')
# def test_upload_file(self):
# # XXX: requests has problems when post a file whose name contains
# # non-ascii data
# fname = 'file-upload-test %s.txt' % randstring()
# furl = self.test_file_url + '?p=/%s' % quote(fname)
# self.delete(furl)
# upload_url = self.test_repo_url + u'upload-link/'
# res = self.get(upload_url)
# upload_api_url = re.match(r'"(.*)"', res.text).group(1)
# files = {
# 'file': (fname, 'Some lines in this file'),
# 'parent_dir': '/',
# }
# res = self.post(upload_api_url, files=files)
# self.assertRegexpMatches(res.text, r'\w{40,40}')
# def test_update_file(self):
# fname = 'file-update-test %s.txt' % randstring()
# _, furl = self.create_file(fname=fname)
# update_url = self.test_repo_url + u'update-link/'
# res = self.get(update_url)
# update_api_url = re.match(r'"(.*)"', res.text).group(1)
# files = {
# 'file': ('filename', 'Updated content of this file'),
# 'target_file': '/test_update.c'
# }
# res = self.post(update_api_url, files=files)
# self.assertRegexpMatches(res.text, r'\w{40,40}')
def test_get_upload_blocks_link(self):
with self.get_tmp_repo() as repo:
upload_blks_url = urljoin(repo.repo_url, 'upload-blks-link')
res = self.get(upload_blks_url)
self.assertRegexpMatches(res.text, r'"http(.*)/upload-blks-api/[^/]+"')
def test_get_update_blocks_link(self):
with self.get_tmp_repo() as repo:
update_blks_url = urljoin(repo.repo_url, 'update-blks-link')
res = self.get(update_blks_url)
self.assertRegexpMatches(res.text, r'"http(.*)/update-blks-api/[^/]+"')
def test_list_dir(self):
with self.get_tmp_repo() as repo:
self.create_file(repo)
self.create_dir(repo)
dirents = self.get(repo.dir_url).json()
self.assertHasLen(dirents, 2)
for dirent in dirents:
self.assertIsNotNone(dirent['id'])
self.assertIsNotNone(dirent['name'])
self.assertIn(dirent['type'], ('file', 'dir'))
if dirent['type'] == 'file':
self.assertIsNotNone(dirent['size'])
def test_remove_dir(self):
with self.get_tmp_repo() as repo:
_, durl = self.create_dir(repo)
res = self.delete(durl)
self.assertEqual(res.text, u'"success"')
self.get(durl, expected=404)
def test_download_dir(self):
with self.get_tmp_repo() as repo:
dpath, _ = self.create_dir(repo)
query = '?p=%s' % quote(dpath)
ddurl = urljoin(repo.dir_url, 'download') + query
res = self.get(ddurl)
self.assertRegexpMatches(res.text,
r'"http(.*)/files/[^/]+/%s"' % quote(dpath[1:]))
def test_share_dir(self):
with self.get_tmp_repo() as repo:
dpath, _ = self.create_dir(repo)
query = '?p=%s' % quote(dpath)
share_dir_url = urljoin(repo.dir_url, 'share/') + query
with self.get_tmp_user() as user:
data = {
'emails': user.user_name,
's_type': 'd',
'path': '/',
'perm': 'r'
}
res = self.post(share_dir_url, data=data)
self.assertEqual(res.text, u'{}')
|
|
import datetime
import struct
__all__ = (
'read_element_id',
'read_element_size',
'read_unsigned_integer',
'read_signed_integer',
'read_float',
'read_string',
'read_unicode_string',
'read_date',
'encode_element_id',
'encode_element_size',
'encode_unsigned_integer',
'encode_signed_integer',
'encode_float',
'encode_string',
'encode_unicode_string',
'encode_date',
)
MAXIMUM_ELEMENT_ID_LENGTH = 4
MAXIMUM_ELEMENT_SIZE_LENGTH = 8
MAXIMUM_UNSIGNED_INTEGER_LENGTH = 8
MAXIMUM_SIGNED_INTEGER_LENGTH = 8
def maximum_element_size_for_length(length):
"""
Returns the maximum element size representable in a given number of bytes.
:arg length: the limit on the length of the encoded representation in bytes
:type length: int
:returns: the maximum element size representable
:rtype: int
"""
return (2**(7*length)) - 2
def decode_vint_length(byte, mask=True):
length = None
value_mask = None
for n in xrange(1, 9):
if byte & (2**8 - (2**(8 - n))) == 2**(8 - n):
length = n
value_mask = (2**(8 - n)) - 1
break
if length is None:
raise IOError('Cannot decode invalid varible-length integer.')
if mask:
byte = byte & value_mask
return length, byte
def read_element_id(stream):
"""
Reads an element ID from a file-like object.
:arg stream: the file-like object
:returns: the decoded element ID and its length in bytes
:rtype: tuple
"""
byte = ord(stream.read(1))
length, id_ = decode_vint_length(byte, False)
if length > 4:
raise IOError('Cannot decode element ID with length > 8.')
for i in xrange(0, length - 1):
byte = ord(stream.read(1))
id_ = (id_ * 2**8) + byte
return id_, length
def read_element_size(stream):
"""
Reads an element size from a file-like object.
:arg stream: the file-like object
:returns: the decoded size (or None if unknown) and the length of the descriptor in bytes
:rtype: tuple
"""
byte = ord(stream.read(1))
length, size = decode_vint_length(byte)
for i in xrange(0, length - 1):
byte = ord(stream.read(1))
size = (size * 2**8) + byte
if size == maximum_element_size_for_length(length) + 1:
size = None
return size, length
def read_unsigned_integer(stream, size):
"""
Reads an encoded unsigned integer value from a file-like object.
:arg stream: the file-like object
:arg size: the number of bytes to read and decode
:type size: int
:returns: the decoded unsigned integer value
:rtype: int
"""
value = 0
for i in xrange(0, size):
byte = ord(stream.read(1))
value = (value << 8) | byte
return value
def read_signed_integer(stream, size):
"""
Reads an encoded signed integer value from a file-like object.
:arg stream: the file-like object
:arg size: the number of bytes to read and decode
:type size: int
:returns: the decoded signed integer value
:rtype: int
"""
value = 0
if size > 0:
first_byte = ord(stream.read(1))
value = first_byte
for i in xrange(1, size):
byte = ord(stream.read(1))
value = (value << 8) | byte
if (first_byte & 0b10000000) == 0b10000000:
value = -(2**(size*8) - value)
return value
def read_float(stream, size):
"""
Reads an encoded floating point value from a file-like object.
:arg stream: the file-like object
:arg size: the number of bytes to read and decode (must be 0, 4, or 8)
:type size: int
:returns: the decoded floating point value
:rtype: float
"""
if size not in (0, 4, 8):
raise IOError('Cannot read floating point values with lengths other than 0, 4, or 8 bytes.')
value = 0.0
if size in (4, 8):
data = stream.read(size)
value = struct.unpack({
4: '>f',
8: '>d'
}[size], data)[0]
return value
def read_string(stream, size):
"""
Reads an encoded ASCII string value from a file-like object.
:arg stream: the file-like object
:arg size: the number of bytes to read and decode
:type size: int
:returns: the decoded ASCII string value
:rtype: str
"""
value = ''
if size > 0:
value = stream.read(size)
value = value.partition(chr(0))[0]
return value
def read_unicode_string(stream, size):
"""
Reads an encoded unicode string value from a file-like object.
:arg stream: the file-like object
:arg size: the number of bytes to read and decode
:type size: int
:returns: the decoded unicode string value
:rtype: unicode
"""
value = u''
if size > 0:
data = stream.read(size)
data = data.partition(chr(0))[0]
value = unicode(data, 'utf_8')
return value
def read_date(stream, size):
"""
Reads an encoded date (and time) value from a file-like object.
:arg stream: the file-like object
:arg size: the number of bytes to read and decode (must be 8)
:type size: int
:returns: the decoded date (and time) value
:rtype: datetime
"""
if size != 8:
raise IOError('Cannot read date values with lengths other than 8 bytes.')
data = stream.read(size)
nanoseconds = struct.unpack('>q', data)[0]
delta = datetime.timedelta(microseconds=(nanoseconds // 1000))
return datetime.datetime(2001, 1, 1, tzinfo=None) + delta
def octet(n):
"""
Limits an integer or byte to 8 bits.
"""
return n & 0b11111111
def vint_mask_for_length(length):
"""
Returns the bitmask for the first byte of a variable-length integer (used for element ID and size descriptors).
:arg length: the length of the variable-length integer
:type length: int
:returns: the bitmask for the first byte of the variable-length integer
:rtype: int
"""
return 0b10000000 >> (length - 1)
def encode_element_id(element_id):
"""
Encodes an element ID.
:arg element_id: an element ID
:type element_id: int
:returns: the encoded representation bytes
:rtype: bytearray
"""
length = MAXIMUM_ELEMENT_ID_LENGTH
while length and not (element_id & (vint_mask_for_length(length) << ((length - 1) * 8))):
length -= 1
if not length:
raise ValueError('Cannot encode invalid element ID %s.' % hex(element_id))
data = bytearray(length)
for index in reversed(xrange(length)):
data[index] = octet(element_id)
element_id >>= 8
return data
def encode_element_size(element_size, length=None):
"""
Encodes an element size. If element_size is None, the size will be encoded as unknown. If length is not None, the size will be encoded in that many bytes; otherwise, the size will be encoded in the minimum number of bytes required, or in 8 bytes if the size is unknown (element_size is None).
:arg element_size: the element size, or None if unknown
:type element_size: int or None
:arg length: the length of the encoded representation, or None for the minimum length required (defaults to None)
:type length: int or None
:returns: the encoded representation bytes
:rtype: bytearray
"""
if length is not None and (length < 1 or length > MAXIMUM_ELEMENT_SIZE_LENGTH):
raise ValueError('Cannot encode element sizes into representations shorter than one byte long or longer than %i bytes long.' % MAXIMUM_ELEMENT_SIZE_LENGTH)
if element_size is not None:
if element_size > maximum_element_size_for_length(MAXIMUM_ELEMENT_SIZE_LENGTH if length is None else length):
raise ValueError('Cannot encode element size %i as it would have an encoded representation longer than %i bytes.' % (element_size, (MAXIMUM_ELEMENT_SIZE_LENGTH if length is None else length)))
req_length = 1
while (element_size >> ((req_length - 1) * 8)) >= (vint_mask_for_length(req_length) - 1) and req_length < MAXIMUM_ELEMENT_SIZE_LENGTH:
req_length += 1
if length is None:
length = req_length
else:
if length is None:
length = 8 # other libraries do this, so unless another length is specified for the unknown size descriptor, do as they do to avoid compatibility issues.
element_size = maximum_element_size_for_length(length) + 1
data = bytearray(length)
for index in reversed(xrange(length)):
data[index] = octet(element_size)
element_size >>= 8
if not index:
data[index] = data[index] | vint_mask_for_length(length)
return data
def encode_unsigned_integer(uint, length=None):
"""
Encodes an unsigned integer value. If length is not None, uint will be encoded in that many bytes; otherwise, uint will be encoded in the minimum number of bytes required. If uint is None or 0, the minimum number of bytes required is 0.
:arg uint: the unsigned integer value
:type uint: int
:arg length: the length of the encoded representation, or None for the minimum length required (defaults to None)
:type length: int or None
:returns: the encoded representation bytes
:rtype: bytearray
"""
if uint is None:
uint = 0
if uint > ((2**((MAXIMUM_UNSIGNED_INTEGER_LENGTH if length is None else length) * 8)) - 1):
raise ValueError('Cannot encode unsigned integer value %i as it would have an encoded representation longer than %i bytes.' % (uint, (MAXIMUM_UNSIGNED_INTEGER_LENGTH if length is None else length)))
elif uint == 0:
req_length = 0
else:
req_length = 1
while uint >= (1 << (req_length * 8)) and req_length < MAXIMUM_UNSIGNED_INTEGER_LENGTH:
req_length += 1
if length is None:
length = req_length
data = bytearray(length)
for index in reversed(xrange(length)):
data[index] = octet(uint)
uint >>= 8
return data
def encode_signed_integer(sint, length=None):
"""
Encodes a signed integer value. If length is not None, sint will be encoded in that many bytes; otherwise, sint will be encoded in the minimum number of bytes required. If sint is None or 0, the minimum number of bytes required is 0.
:arg sint: the signed integer value
:type sint: int
:arg length: the length of the encoded representation, or None for the minimum length required (defaults to None)
:type length: int or None
:returns: the encoded representation bytes
:rtype: bytearray
"""
if sint is None:
sint = 0
if not (-(2**(7+(8*((MAXIMUM_SIGNED_INTEGER_LENGTH if length is None else length)-1)))) <= sint <= (2**(7+(8*((MAXIMUM_SIGNED_INTEGER_LENGTH if length is None else length)-1))))-1):
raise ValueError('Cannot encode signed integer value %i as it would have an encoded representation longer than %i bytes.' % (sint, (MAXIMUM_SIGNED_INTEGER_LENGTH if length is None else length)))
elif sint == 0:
req_length = 0
uint = 0
if length is None:
length = req_length
else:
uint = ((-sint - 1) << 1) if sint < 0 else (sint << 1)
req_length = 1
while uint >= (1 << (req_length * 8)) and req_length < MAXIMUM_UNSIGNED_INTEGER_LENGTH:
req_length += 1
if length is None:
length = req_length
if sint >= 0:
uint = sint
else:
uint = 2**(length*8) - abs(sint)
data = bytearray(length)
for index in reversed(xrange(length)):
data[index] = octet(uint)
uint >>= 8
return data
def encode_float(float_, length=None):
"""
Encodes a floating point value. If length is not None, float_ will be encoded in that many bytes; otherwise, float_ will be encoded in 0 bytes if float_ is None or 0, and 8 bytes in all other cases. If float_ is not None or 0 and length is 0, ValueError will be raised.
:arg float_: the floating point value
:type float_: float
:arg length: the length of the encoded representation, or None (defaults to None)
:type length: int or None
:returns: the encoded representation bytes
:rtype: bytearray
"""
if length not in (None, 0, 4, 8):
raise ValueError('Cannot encode floating point values with lengths other than 0, 4, or 8 bytes.')
if float_ is None:
float_ = 0.0
if float_ == 0.0:
if length is None:
length = 0
else:
if length is None:
length = 8
elif length == 0:
raise ValueError('Cannot encode floating point value %f as it would have an encoded representation longer than 0 bytes.' % float_)
if length in (4, 8):
data = bytearray(struct.pack({
4: '>f',
8: '>d'
}[length], float_))
else:
data = bytearray()
return data
def encode_string(string, length=None):
"""
Encodes an ASCII string value. If length is not None, string will be encoded in that many bytes by padding with zero bytes at the end if necessary; otherwise, string will be encoded in the minimum number of bytes required. If string is None or empty, the minimum number of bytes required is 0.
:arg string: the ASCII string value
:type string: str
:arg length: the length of the encoded representation, or None for the minimum length required (defaults to None)
:type length: int or None
:returns: the encoded representation bytes
:rtype: bytearray
"""
if string is None:
string = ''
if length is None:
length = len(string)
else:
if length < len(string):
raise ValueError('Cannot encode ASCII string value \'%s\' as it would have an encoded representation longer than %i bytes.' % (string, length))
elif length > len(string):
for i in xrange(0, (length - len(string))):
string += chr(0)
return bytearray(string)
def encode_unicode_string(string, length=None):
"""
Encodes a unicode string value. If length is not None, string will be encoded in that many bytes by padding with zero bytes at the end if necessary; otherwise, string will be encoded in the minimum number of bytes required. If string is None or empty, the minimum number of bytes required is 0.
:arg string: the unicode string value
:type string: unicode
:arg length: the length of the encoded representation, or None for the minimum length required (defaults to None)
:type length: int or None
:returns: the encoded representation bytes
:rtype: bytearray
"""
if string is None:
string = u''
return encode_string(string.encode('utf_8'), length)
def encode_date(date, length=None):
"""
Encodes a date (and time) value. If length is not None, it must be 8. If date is None, the current date (and time) will be encoded.
:arg date: the date (and time) value
:type date: datetime.datettime
:arg length: the length of the encoded representation (must be 8), or None
:type length: int or None
:returns: the encoded representation bytes
:rtype: bytearray
"""
if date is None:
date = datetime.datetime.utcnow()
else:
date = (date - date.utcoffset()).replace(tzinfo=None)
if length is None:
length = 8
elif length != 8:
raise ValueError('Cannot encode date value %s with any length other than 8 bytes.')
delta = date - datetime.datetime(2001, 1, 1, tzinfo=None)
nanoseconds = (delta.microseconds + ((delta.seconds + (delta.days * 24 * 60 * 60)) * 10**6)) * 10**3
return encode_signed_integer(nanoseconds, length)
|
|
"""Component to interface with an alarm control panel."""
from __future__ import annotations
from dataclasses import dataclass
from datetime import timedelta
import logging
from typing import Any, Final, final
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_CODE,
ATTR_CODE_FORMAT,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_ARM_VACATION,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import make_entity_service_schema
from homeassistant.helpers.entity import Entity, EntityDescription
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.typing import ConfigType
from .const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_CUSTOM_BYPASS,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
SUPPORT_ALARM_ARM_VACATION,
SUPPORT_ALARM_TRIGGER,
)
_LOGGER: Final = logging.getLogger(__name__)
DOMAIN: Final = "alarm_control_panel"
SCAN_INTERVAL: Final = timedelta(seconds=30)
ATTR_CHANGED_BY: Final = "changed_by"
FORMAT_TEXT: Final = "text"
FORMAT_NUMBER: Final = "number"
ATTR_CODE_ARM_REQUIRED: Final = "code_arm_required"
ENTITY_ID_FORMAT: Final = DOMAIN + ".{}"
ALARM_SERVICE_SCHEMA: Final = make_entity_service_schema(
{vol.Optional(ATTR_CODE): cv.string}
)
PLATFORM_SCHEMA: Final = cv.PLATFORM_SCHEMA
PLATFORM_SCHEMA_BASE: Final = cv.PLATFORM_SCHEMA_BASE
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Track states and offer events for sensors."""
component = hass.data[DOMAIN] = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_ALARM_DISARM, ALARM_SERVICE_SCHEMA, "async_alarm_disarm"
)
component.async_register_entity_service(
SERVICE_ALARM_ARM_HOME,
ALARM_SERVICE_SCHEMA,
"async_alarm_arm_home",
[SUPPORT_ALARM_ARM_HOME],
)
component.async_register_entity_service(
SERVICE_ALARM_ARM_AWAY,
ALARM_SERVICE_SCHEMA,
"async_alarm_arm_away",
[SUPPORT_ALARM_ARM_AWAY],
)
component.async_register_entity_service(
SERVICE_ALARM_ARM_NIGHT,
ALARM_SERVICE_SCHEMA,
"async_alarm_arm_night",
[SUPPORT_ALARM_ARM_NIGHT],
)
component.async_register_entity_service(
SERVICE_ALARM_ARM_VACATION,
ALARM_SERVICE_SCHEMA,
"async_alarm_arm_vacation",
[SUPPORT_ALARM_ARM_VACATION],
)
component.async_register_entity_service(
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
ALARM_SERVICE_SCHEMA,
"async_alarm_arm_custom_bypass",
[SUPPORT_ALARM_ARM_CUSTOM_BYPASS],
)
component.async_register_entity_service(
SERVICE_ALARM_TRIGGER,
ALARM_SERVICE_SCHEMA,
"async_alarm_trigger",
[SUPPORT_ALARM_TRIGGER],
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_unload_entry(entry)
@dataclass
class AlarmControlPanelEntityDescription(EntityDescription):
"""A class that describes alarm control panel entities."""
class AlarmControlPanelEntity(Entity):
"""An abstract class for alarm control entities."""
entity_description: AlarmControlPanelEntityDescription
_attr_changed_by: str | None = None
_attr_code_arm_required: bool = True
_attr_code_format: str | None = None
_attr_supported_features: int
@property
def code_format(self) -> str | None:
"""Regex for code format or None if no code is required."""
return self._attr_code_format
@property
def changed_by(self) -> str | None:
"""Last change triggered by."""
return self._attr_changed_by
@property
def code_arm_required(self) -> bool:
"""Whether the code is required for arm actions."""
return self._attr_code_arm_required
def alarm_disarm(self, code: str | None = None) -> None:
"""Send disarm command."""
raise NotImplementedError()
async def async_alarm_disarm(self, code: str | None = None) -> None:
"""Send disarm command."""
await self.hass.async_add_executor_job(self.alarm_disarm, code)
def alarm_arm_home(self, code: str | None = None) -> None:
"""Send arm home command."""
raise NotImplementedError()
async def async_alarm_arm_home(self, code: str | None = None) -> None:
"""Send arm home command."""
await self.hass.async_add_executor_job(self.alarm_arm_home, code)
def alarm_arm_away(self, code: str | None = None) -> None:
"""Send arm away command."""
raise NotImplementedError()
async def async_alarm_arm_away(self, code: str | None = None) -> None:
"""Send arm away command."""
await self.hass.async_add_executor_job(self.alarm_arm_away, code)
def alarm_arm_night(self, code: str | None = None) -> None:
"""Send arm night command."""
raise NotImplementedError()
async def async_alarm_arm_night(self, code: str | None = None) -> None:
"""Send arm night command."""
await self.hass.async_add_executor_job(self.alarm_arm_night, code)
def alarm_arm_vacation(self, code: str | None = None) -> None:
"""Send arm vacation command."""
raise NotImplementedError()
async def async_alarm_arm_vacation(self, code: str | None = None) -> None:
"""Send arm vacation command."""
await self.hass.async_add_executor_job(self.alarm_arm_vacation, code)
def alarm_trigger(self, code: str | None = None) -> None:
"""Send alarm trigger command."""
raise NotImplementedError()
async def async_alarm_trigger(self, code: str | None = None) -> None:
"""Send alarm trigger command."""
await self.hass.async_add_executor_job(self.alarm_trigger, code)
def alarm_arm_custom_bypass(self, code: str | None = None) -> None:
"""Send arm custom bypass command."""
raise NotImplementedError()
async def async_alarm_arm_custom_bypass(self, code: str | None = None) -> None:
"""Send arm custom bypass command."""
await self.hass.async_add_executor_job(self.alarm_arm_custom_bypass, code)
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return self._attr_supported_features
@final
@property
def state_attributes(self) -> dict[str, Any] | None:
"""Return the state attributes."""
return {
ATTR_CODE_FORMAT: self.code_format,
ATTR_CHANGED_BY: self.changed_by,
ATTR_CODE_ARM_REQUIRED: self.code_arm_required,
}
class AlarmControlPanel(AlarmControlPanelEntity):
"""An abstract class for alarm control entities (for backwards compatibility)."""
def __init_subclass__(cls, **kwargs: Any) -> None:
"""Print deprecation warning."""
super().__init_subclass__(**kwargs) # type: ignore[call-arg]
_LOGGER.warning(
"AlarmControlPanel is deprecated, modify %s to extend AlarmControlPanelEntity",
cls.__name__,
)
|
|
import cPickle as pickle
import os, sys
from sqlalchemy.types import BLOB
from sqlalchemy import create_engine, Table, Column, Integer, String, MetaData, ForeignKey, UniqueConstraint
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation, backref, sessionmaker
from sqlalchemy.orm.exc import NoResultFound
import logging
logger = logging.getLogger('busmap.urbsweb.mapa')
dbg = logger.debug
Base = declarative_base()
class MiscKeyVal(Base):
"""Useful for simple key->value storage
Don't abuse it. Use just for quickly testing stuff
"""
__tablename__ = 'misc_keyval'
key = Column(String, primary_key=True)
type = Column(String) # a type, to allow easier filtering
value = Column(BLOB)
class Database:
def __init__(self, url):
self.engine = create_engine(url)
self.session = sessionmaker(bind=self.engine)()
def create_tables(self):
Base.metadata.create_all(self.engine)
# session shortcuts:
def add(self, *args):
return self.session.add(*args)
def commit(self):
return self.session.commit()
def query(self, *a, **kw):
return self.session.query(*a, **kw)
def _keyval_query(self, key):
return self.query(MiscKeyVal).filter_by(key=key)
# Misc key/val funcs:
def _put_keyval(self, type, key, value):
dbg('storing keyval: type: %r, key: %r, value: %r', type, key, value)
q = self._keyval_query(key)
existing = q.first()
if existing is not None:
kv = existing
else:
kv = MiscKeyVal(key=key, type=type)
self.add(kv)
kv.value = value
#self.commit()
return kv
def put_keyval(self, key, obj):
val = pickle.dumps(obj)
type = key.split('.')[0]
self._put_keyval(type, key, val)
def _get_keyval(self, key, default=None):
dbg('fetching keyval: key: %r', key)
kv = self._keyval_query(key).first()
if kv is not None:
dbg('keyval found: %r', kv)
return kv.value
dbg('keyval not found')
return default
def has_keyval(self, key):
return self._keyval_query(key).count() > 0
def get_keyval(self, key, default=None):
v = self._get_keyval(key)
if v is None:
return default
obj = pickle.loads(str(v))
return obj
def check_keyval(self, key, fn):
v = self.get_keyval(key)
if v is None:
v = fn()
self.put_keyval(key, v)
return v
def dump_keyvals(self, outdir, pattern=None, unpickle=True):
q = self.query(MiscKeyVal)
if pattern:
q = q.filter_by(MiscKeyVal.key.like(pattern))
for kv in q:
path = os.path.join(outdir, kv.key)
v = kv.value
if unpickle:
d = pickle.loads(v)
if type(d) is str:
v = d
print repr(path), len(v)
open(path, 'w').write(v)
if __name__ == '__main__':
import sys
db = Database(sys.argv[1])
db.dump_keyvals(sys.argv[2], sys.argv[3])
### OLD DEPRECATED DB CODE BEGIN
# ### I reinvented the wheel. I will use sqlalchemy instead
#
# import dbutil.db
# from dbutil.defs import *
#
#
# def new_db(**kwargs):
# return dbutil.db.new_dbsession(kwargs)
#
#
#
# f_linhas = [
# ('id', 'int(20)', PRIMARY_KEY|AUTOINC),
# ('iditi', 'varchar(16)', CAN_NULL|INDEXME),
# ('idhor', 'varchar(16)', CAN_NULL|INDEXME),
# ('shortname','varchar(32)', CAN_NULL|INDEXME),
# ('nome', 'varchar(32)', INDEXME),
# ]
# # 'KEY `locateimport` (`uso`, `tipo`, `bairro`)'
#
# f_pontos = [
# ('id', 'int(20)', PRIMARY_KEY|AUTOINC),
# ('idmapa', 'varchar(16)', CAN_NULL),
# ('nome', 'varchar(32)', INDEXME),
# ]
#
# f_ruas = [
# ('id', 'int(20)', PRIMARY_KEY|AUTOINC),
# ('iditi', 'varchar(16)', INDEXME|CAN_NULL),
# ('nome', 'varchar(64)', 0),
# ]
#
# f_horsets = [
# ('id', 'int(20)', PRIMARY_KEY|AUTOINC),
# ('idlinha', 'int(20)', 0),
# ('idponto', 'int(20)', 0),
# ('dia', 'int(20)', 0),
# ('apartir', 'varchar(16)', 0),
# ]
#
# f_horarios = [
# ('idset', 'int(20)', 0),
# ('hora', 'char(5)', 0),
# ('special', 'tinyint(1)', 0),
# ]
#
# i_horarios = [
# ('sethora', ('idset', 'hora'))
# ]
#
# f_itinerarios = [
# ('idlinha', 'int(20)', 0),
# ('seq', 'int(20)', 0),
# ('idrua', 'int(20)', 0),
# ]
#
# i_itinerarios = [
# ('linhaseq', ('idlinha', 'seq')),
# ]
#
# tables = [
# ('linhas', f_linhas, []),
# ('pontos', f_pontos, []),
# ('ruas', f_ruas, []),
# ('horsets', f_horsets, []),
# ('horarios', f_horarios, i_horarios),
# ('itinerarios', f_itinerarios, i_itinerarios),
# ]
#
# def create_tables(c):
# for name,fields,indexes in tables:
# c.def_table(name, fields, indexes)
#
#
# if __name__ == '__main__':
# import dbutil.creator
# import busmap.env
# import sys
# db = busmap.env.db
# destruct = False
# if len(sys.argv) > 1 and sys.argv[1] == '-f':
# destruct = True
# c = dbutil.creator.DatabaseCreator(db, destruct)
# c.prepare()
# create_tables(c)
#
### OLD DEPRECATED DB CODE END
|
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
import pprint
import sqlalchemy as sa
from twisted.trial import unittest
from twisted.internet import defer, task
from buildbot.changes.changes import Change
from buildbot.db import changes
from buildbot.test.util import connector_component
from buildbot.test.fake import fakedb
from buildbot.util import epoch2datetime
class TestChangesConnectorComponent(
connector_component.ConnectorComponentMixin,
unittest.TestCase):
def setUp(self):
d = self.setUpConnectorComponent(
table_names=['changes', 'change_links', 'change_files',
'change_properties', 'scheduler_changes', 'schedulers',
'sourcestamps', 'sourcestamp_changes', 'patches' ])
def finish_setup(_):
self.db.changes = changes.ChangesConnectorComponent(self.db)
d.addCallback(finish_setup)
return d
def tearDown(self):
return self.tearDownConnectorComponent()
# common sample data
change13_rows = [
fakedb.Change(changeid=13, author="dustin", comments="fix spelling",
is_dir=0, branch="master", revision="deadbeef",
when_timestamp=266738400, revlink=None, category=None,
repository='', project=''),
fakedb.ChangeLink(changeid=13, link='http://buildbot.net'),
fakedb.ChangeLink(changeid=13, link='http://sf.net/projects/buildbot'),
fakedb.ChangeFile(changeid=13, filename='master/README.txt'),
fakedb.ChangeFile(changeid=13, filename='slave/README.txt'),
fakedb.ChangeProperty(changeid=13, property_name='notest',
property_value='["no","Change"]'),
]
change14_rows = [
fakedb.Change(changeid=14, author="warner", comments="fix whitespace",
is_dir=0, branch="warnerdb", revision="0e92a098b",
when_timestamp=266738404, revlink='http://warner/0e92a098b',
category='devel', repository='git://warner', project='Buildbot'),
fakedb.ChangeFile(changeid=14, filename='master/buildbot/__init__.py'),
]
change14_dict = {
'changeid': 14,
'author': u'warner',
'branch': u'warnerdb',
'category': u'devel',
'comments': u'fix whitespace',
'files': [u'master/buildbot/__init__.py'],
'is_dir': 0,
'links': [],
'project': u'Buildbot',
'properties': {},
'repository': u'git://warner',
'revision': u'0e92a098b',
'revlink': u'http://warner/0e92a098b',
'when_timestamp': epoch2datetime(266738404),
}
def change14(self):
c = Change(**dict(
category='devel',
isdir=0,
repository=u'git://warner',
links=[],
who=u'warner',
when=266738404,
comments=u'fix whitespace',
project=u'Buildbot',
branch=u'warnerdb',
revlink=u'http://warner/0e92a098b',
properties={},
files=[u'master/buildbot/__init__.py'],
revision=u'0e92a098b'))
c.number = 14
return c
# assertions
def assertChangesEqual(self, ca, cb):
ok = True
ok = ok and ca.number == cb.number
ok = ok and ca.who == cb.who
ok = ok and sorted(ca.files) == sorted(cb.files)
ok = ok and ca.comments == cb.comments
ok = ok and bool(ca.isdir) == bool(cb.isdir)
ok = ok and sorted(ca.links) == sorted(cb.links)
ok = ok and ca.revision == cb.revision
ok = ok and ca.when == cb.when
ok = ok and ca.branch == cb.branch
ok = ok and ca.category == cb.category
ok = ok and ca.revlink == cb.revlink
ok = ok and ca.properties == cb.properties
ok = ok and ca.repository == cb.repository
ok = ok and ca.project == cb.project
if not ok:
def printable(c):
return pprint.pformat(c.__dict__)
self.fail("changes do not match; expected\n%s\ngot\n%s" %
(printable(ca), printable(cb)))
# tests
def test_getChange(self):
d = self.insertTestData(self.change14_rows)
def get14(_):
return self.db.changes.getChange(14)
d.addCallback(get14)
def check14(chdict):
self.assertEqual(chdict, self.change14_dict)
d.addCallback(check14)
return d
def test_Change_fromChdict_with_chdict(self):
# test that the chdict getChange returns works with Change.fromChdict
d = Change.fromChdict(mock.Mock(), self.change14_dict)
def check(c):
self.assertChangesEqual(c, self.change14())
d.addCallback(check)
return d
def test_getChange_missing(self):
d = defer.succeed(None)
def get14(_):
return self.db.changes.getChange(14)
d.addCallback(get14)
def check14(chdict):
self.failUnless(chdict is None)
d.addCallback(check14)
return d
def test_getLatestChangeid(self):
d = self.insertTestData(self.change13_rows)
def get(_):
return self.db.changes.getLatestChangeid()
d.addCallback(get)
def check(changeid):
self.assertEqual(changeid, 13)
d.addCallback(check)
return d
def test_getLatestChangeid_empty(self):
d = defer.succeed(None)
def get(_):
return self.db.changes.getLatestChangeid()
d.addCallback(get)
def check(changeid):
self.assertEqual(changeid, None)
d.addCallback(check)
return d
def test_addChange(self):
d = self.db.changes.addChange(
author=u'dustin',
files=[u'master/LICENSING.txt', u'slave/LICENSING.txt'],
comments=u'fix spelling',
is_dir=0,
links=[u'http://slashdot.org', u'http://wired.com/g'],
revision=u'2d6caa52',
when_timestamp=epoch2datetime(266738400),
branch=u'master',
category=None,
revlink=None,
properties={u'platform': (u'linux', 'Change')},
repository=u'',
project=u'')
# check all of the columns of the four relevant tables
def check_change(changeid):
def thd(conn):
self.assertEqual(changeid, 1)
r = conn.execute(self.db.model.changes.select())
r = r.fetchall()
self.assertEqual(len(r), 1)
self.assertEqual(r[0].changeid, changeid)
self.assertEqual(r[0].author, 'dustin')
self.assertEqual(r[0].comments, 'fix spelling')
self.assertFalse(r[0].is_dir)
self.assertEqual(r[0].branch, 'master')
self.assertEqual(r[0].revision, '2d6caa52')
self.assertEqual(r[0].when_timestamp, 266738400)
self.assertEqual(r[0].category, None)
self.assertEqual(r[0].repository, '')
self.assertEqual(r[0].project, '')
return self.db.pool.do(thd)
d.addCallback(check_change)
def check_change_links(_):
def thd(conn):
query = self.db.model.change_links.select()
query.where(self.db.model.change_links.c.changeid == 1)
query.order_by(self.db.model.change_links.c.link)
r = conn.execute(query)
r = r.fetchall()
self.assertEqual(len(r), 2)
self.assertEqual(r[0].link, 'http://slashdot.org')
self.assertEqual(r[1].link, 'http://wired.com/g')
return self.db.pool.do(thd)
d.addCallback(check_change_links)
def check_change_files(_):
def thd(conn):
query = self.db.model.change_files.select()
query.where(self.db.model.change_files.c.changeid == 1)
query.order_by(self.db.model.change_files.c.filename)
r = conn.execute(query)
r = r.fetchall()
self.assertEqual(len(r), 2)
self.assertEqual(r[0].filename, 'master/LICENSING.txt')
self.assertEqual(r[1].filename, 'slave/LICENSING.txt')
return self.db.pool.do(thd)
d.addCallback(check_change_files)
def check_change_properties(_):
def thd(conn):
query = self.db.model.change_properties.select()
query.where(self.db.model.change_properties.c.changeid == 1)
query.order_by(self.db.model.change_properties.c.property_name)
r = conn.execute(query)
r = r.fetchall()
self.assertEqual(len(r), 1)
self.assertEqual(r[0].property_name, 'platform')
self.assertEqual(r[0].property_value, '["linux", "Change"]')
return self.db.pool.do(thd)
d.addCallback(check_change_properties)
return d
def test_addChange_when_timestamp_None(self):
clock = task.Clock()
clock.advance(1239898353)
d = self.db.changes.addChange(
author=u'dustin',
files=[],
comments=u'fix spelling',
is_dir=0,
links=[],
revision=u'2d6caa52',
when_timestamp=None,
branch=u'master',
category=None,
revlink=None,
properties={},
repository=u'',
project=u'',
_reactor=clock)
# check all of the columns of the four relevant tables
def check_change(changeid):
def thd(conn):
r = conn.execute(self.db.model.changes.select())
r = r.fetchall()
self.assertEqual(len(r), 1)
self.assertEqual(r[0].changeid, changeid)
self.assertEqual(r[0].when_timestamp, 1239898353)
return self.db.pool.do(thd)
d.addCallback(check_change)
def check_change_links(_):
def thd(conn):
query = self.db.model.change_links.select()
r = conn.execute(query)
r = r.fetchall()
self.assertEqual(len(r), 0)
return self.db.pool.do(thd)
d.addCallback(check_change_links)
def check_change_files(_):
def thd(conn):
query = self.db.model.change_files.select()
r = conn.execute(query)
r = r.fetchall()
self.assertEqual(len(r), 0)
return self.db.pool.do(thd)
d.addCallback(check_change_files)
def check_change_properties(_):
def thd(conn):
query = self.db.model.change_properties.select()
r = conn.execute(query)
r = r.fetchall()
self.assertEqual(len(r), 0)
return self.db.pool.do(thd)
d.addCallback(check_change_properties)
return d
def test_pruneChanges(self):
d = self.insertTestData([
fakedb.Scheduler(schedulerid=29),
fakedb.SourceStamp(id=234),
fakedb.Change(changeid=11),
fakedb.Change(changeid=12),
fakedb.SchedulerChange(schedulerid=29, changeid=12),
fakedb.SourceStampChange(sourcestampid=234, changeid=12),
] +
self.change13_rows + [
fakedb.SchedulerChange(schedulerid=29, changeid=13),
] +
self.change14_rows + [
fakedb.SchedulerChange(schedulerid=29, changeid=14),
fakedb.Change(changeid=15),
fakedb.SourceStampChange(sourcestampid=234, changeid=15),
]
)
# pruning with a horizon of 2 should delete changes 11, 12 and 13
d.addCallback(lambda _ : self.db.changes.pruneChanges(2))
def check(_):
def thd(conn):
results = {}
for tbl_name in ('scheduler_changes', 'sourcestamp_changes',
'change_files', 'change_links',
'change_properties', 'changes'):
tbl = self.db.model.metadata.tables[tbl_name]
r = conn.execute(sa.select([tbl.c.changeid]))
results[tbl_name] = sorted([ r[0] for r in r.fetchall() ])
self.assertEqual(results, {
'scheduler_changes': [14],
'sourcestamp_changes': [15],
'change_files': [14],
'change_links': [],
'change_properties': [],
'changes': [14, 15],
})
return self.db.pool.do(thd)
d.addCallback(check)
return d
def test_pruneChanges_None(self):
d = self.insertTestData(self.change13_rows)
d.addCallback(lambda _ : self.db.changes.pruneChanges(None))
def check(_):
def thd(conn):
tbl = self.db.model.changes
r = conn.execute(tbl.select())
self.assertEqual([ row.changeid for row in r.fetchall() ],
[ 13 ])
return self.db.pool.do(thd)
d.addCallback(check)
return d
def test_getRecentChanges_subset(self):
d = self.insertTestData([
fakedb.Change(changeid=8),
fakedb.Change(changeid=9),
fakedb.Change(changeid=10),
fakedb.Change(changeid=11),
fakedb.Change(changeid=12),
] + self.change13_rows + self.change14_rows)
d.addCallback(lambda _ :
self.db.changes.getRecentChanges(5))
def check(changes):
changeids = [ c['changeid'] for c in changes ]
self.assertEqual(changeids, [10, 11, 12, 13, 14])
d.addCallback(check)
return d
def test_getRecentChanges_empty(self):
d = defer.succeed(None)
d.addCallback(lambda _ :
self.db.changes.getRecentChanges(5))
def check(changes):
changeids = [ c['changeid'] for c in changes ]
self.assertEqual(changeids, [])
d.addCallback(check)
return d
def test_getRecentChanges_missing(self):
d = self.insertTestData(self.change13_rows + self.change14_rows)
d.addCallback(lambda _ :
self.db.changes.getRecentChanges(5))
def check(changes):
# requested 5, but only got 2
changeids = [ c['changeid'] for c in changes ]
self.assertEqual(changeids, [13, 14])
# double-check that they have .files, etc.
self.assertEqual(sorted(changes[0]['files']),
sorted(['master/README.txt', 'slave/README.txt']))
self.assertEqual(sorted(changes[0]['links']),
sorted(['http://buildbot.net',
'http://sf.net/projects/buildbot']))
self.assertEqual(changes[0]['properties'],
{ 'notest' : ('no', 'Change') })
d.addCallback(check)
return d
|
|
"""
Utilities for fast persistence of big data, with optional compression.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import pickle
import traceback
import sys
import os
import zlib
import warnings
from ._compat import _basestring
from io import BytesIO
if sys.version_info[0] >= 3:
Unpickler = pickle._Unpickler
Pickler = pickle._Pickler
def asbytes(s):
if isinstance(s, bytes):
return s
return s.encode('latin1')
else:
Unpickler = pickle.Unpickler
Pickler = pickle.Pickler
asbytes = str
_MEGA = 2 ** 20
_MAX_LEN = len(hex(2 ** 64))
# To detect file types
_ZFILE_PREFIX = asbytes('ZF')
###############################################################################
# Compressed file with Zlib
def _read_magic(file_handle):
""" Utility to check the magic signature of a file identifying it as a
Zfile
"""
magic = file_handle.read(len(_ZFILE_PREFIX))
# Pickling needs file-handles at the beginning of the file
file_handle.seek(0)
return magic
def read_zfile(file_handle):
"""Read the z-file and return the content as a string
Z-files are raw data compressed with zlib used internally by joblib
for persistence. Backward compatibility is not guaranteed. Do not
use for external purposes.
"""
file_handle.seek(0)
assert _read_magic(file_handle) == _ZFILE_PREFIX, \
"File does not have the right magic"
length = file_handle.read(len(_ZFILE_PREFIX) + _MAX_LEN)
length = length[len(_ZFILE_PREFIX):]
length = int(length, 16)
# We use the known length of the data to tell Zlib the size of the
# buffer to allocate.
data = zlib.decompress(file_handle.read(), 15, length)
assert len(data) == length, (
"Incorrect data length while decompressing %s."
"The file could be corrupted." % file_handle)
return data
def write_zfile(file_handle, data, compress=1):
"""Write the data in the given file as a Z-file.
Z-files are raw data compressed with zlib used internally by joblib
for persistence. Backward compatibility is not guarantied. Do not
use for external purposes.
"""
file_handle.write(_ZFILE_PREFIX)
length = hex(len(data))
if sys.version_info[0] < 3 and type(length) is long:
# We need to remove the trailing 'L' in the hex representation
length = length[:-1]
# Store the length of the data
file_handle.write(asbytes(length.ljust(_MAX_LEN)))
file_handle.write(zlib.compress(asbytes(data), compress))
###############################################################################
# Utility objects for persistence.
class NDArrayWrapper(object):
""" An object to be persisted instead of numpy arrays.
The only thing this object does, is to carry the filename in which
the array has been persisted, and the array subclass.
"""
def __init__(self, filename, subclass):
"Store the useful information for later"
self.filename = filename
self.subclass = subclass
def read(self, unpickler):
"Reconstruct the array"
filename = os.path.join(unpickler._dirname, self.filename)
# Load the array from the disk
if unpickler.np.__version__ >= '1.3':
array = unpickler.np.load(filename,
mmap_mode=unpickler.mmap_mode)
else:
# Numpy does not have mmap_mode before 1.3
array = unpickler.np.load(filename)
# Reconstruct subclasses. This does not work with old
# versions of numpy
if (hasattr(array, '__array_prepare__')
and not self.subclass in (unpickler.np.ndarray,
unpickler.np.memmap)):
# We need to reconstruct another subclass
new_array = unpickler.np.core.multiarray._reconstruct(
self.subclass, (0,), 'b')
new_array.__array_prepare__(array)
array = new_array
return array
#def __reduce__(self):
# return None
class ZNDArrayWrapper(NDArrayWrapper):
"""An object to be persisted instead of numpy arrays.
This object store the Zfile filename in which
the data array has been persisted, and the meta information to
retrieve it.
The reason that we store the raw buffer data of the array and
the meta information, rather than array representation routine
(tostring) is that it enables us to use completely the strided
model to avoid memory copies (a and a.T store as fast). In
addition saving the heavy information separately can avoid
creating large temporary buffers when unpickling data with
large arrays.
"""
def __init__(self, filename, init_args, state):
"Store the useful information for later"
self.filename = filename
self.state = state
self.init_args = init_args
def read(self, unpickler):
"Reconstruct the array from the meta-information and the z-file"
# Here we a simply reproducing the unpickling mechanism for numpy
# arrays
filename = os.path.join(unpickler._dirname, self.filename)
array = unpickler.np.core.multiarray._reconstruct(*self.init_args)
data = read_zfile(open(filename, 'rb'))
state = self.state + (data,)
array.__setstate__(state)
return array
###############################################################################
# Pickler classes
class NumpyPickler(Pickler):
"""A pickler to persist of big data efficiently.
The main features of this object are:
* persistence of numpy arrays in separate .npy files, for which
I/O is fast.
* optional compression using Zlib, with a special care on avoid
temporaries.
"""
def __init__(self, filename, compress=0, cache_size=10):
self._filename = filename
self._filenames = [filename, ]
self.cache_size = cache_size
self.compress = compress
if not self.compress:
self.file = open(filename, 'wb')
else:
self.file = BytesIO()
# Count the number of npy files that we have created:
self._npy_counter = 0
Pickler.__init__(self, self.file,
protocol=pickle.HIGHEST_PROTOCOL)
# delayed import of numpy, to avoid tight coupling
try:
import numpy as np
except ImportError:
np = None
self.np = np
def _write_array(self, array, filename):
if not self.compress:
self.np.save(filename, array)
container = NDArrayWrapper(os.path.basename(filename),
type(array))
else:
filename += '.z'
# Efficient compressed storage:
# The meta data is stored in the container, and the core
# numerics in a z-file
_, init_args, state = array.__reduce__()
# the last entry of 'state' is the data itself
zfile = open(filename, 'wb')
write_zfile(zfile, state[-1],
compress=self.compress)
zfile.close()
state = state[:-1]
container = ZNDArrayWrapper(os.path.basename(filename),
init_args, state)
return container, filename
def save(self, obj):
""" Subclass the save method, to save ndarray subclasses in npy
files, rather than pickling them. Of course, this is a
total abuse of the Pickler class.
"""
if self.np is not None and type(obj) in (self.np.ndarray,
self.np.matrix, self.np.memmap):
size = obj.size * obj.itemsize
if self.compress and size < self.cache_size * _MEGA:
# When compressing, as we are not writing directly to the
# disk, it is more efficient to use standard pickling
if type(obj) is self.np.memmap:
# Pickling doesn't work with memmaped arrays
obj = self.np.asarray(obj)
return Pickler.save(self, obj)
self._npy_counter += 1
try:
filename = '%s_%02i.npy' % (self._filename,
self._npy_counter)
# This converts the array in a container
obj, filename = self._write_array(obj, filename)
self._filenames.append(filename)
except:
self._npy_counter -= 1
# XXX: We should have a logging mechanism
print('Failed to save %s to .npy file:\n%s' % (
type(obj),
traceback.format_exc()))
return Pickler.save(self, obj)
def close(self):
if self.compress:
zfile = open(self._filename, 'wb')
write_zfile(zfile,
self.file.getvalue(), self.compress)
zfile.close()
class NumpyUnpickler(Unpickler):
"""A subclass of the Unpickler to unpickle our numpy pickles.
"""
dispatch = Unpickler.dispatch.copy()
def __init__(self, filename, file_handle, mmap_mode=None):
self._filename = os.path.basename(filename)
self._dirname = os.path.dirname(filename)
self.mmap_mode = mmap_mode
self.file_handle = self._open_pickle(file_handle)
Unpickler.__init__(self, self.file_handle)
try:
import numpy as np
except ImportError:
np = None
self.np = np
def _open_pickle(self, file_handle):
return file_handle
def load_build(self):
""" This method is called to set the state of a newly created
object.
We capture it to replace our place-holder objects,
NDArrayWrapper, by the array we are interested in. We
replace them directly in the stack of pickler.
"""
Unpickler.load_build(self)
if isinstance(self.stack[-1], NDArrayWrapper):
if self.np is None:
raise ImportError('Trying to unpickle an ndarray, '
"but numpy didn't import correctly")
nd_array_wrapper = self.stack.pop()
array = nd_array_wrapper.read(self)
self.stack.append(array)
# Be careful to register our new method.
if sys.version_info[0] >= 3:
dispatch[pickle.BUILD[0]] = load_build
else:
dispatch[pickle.BUILD] = load_build
class ZipNumpyUnpickler(NumpyUnpickler):
"""A subclass of our Unpickler to unpickle on the fly from
compressed storage."""
def __init__(self, filename, file_handle):
NumpyUnpickler.__init__(self, filename,
file_handle,
mmap_mode=None)
def _open_pickle(self, file_handle):
return BytesIO(read_zfile(file_handle))
###############################################################################
# Utility functions
def dump(value, filename, compress=0, cache_size=100):
"""Fast persistence of an arbitrary Python object into a files, with
dedicated storage for numpy arrays.
Parameters
-----------
value: any Python object
The object to store to disk
filename: string
The name of the file in which it is to be stored
compress: integer for 0 to 9, optional
Optional compression level for the data. 0 is no compression.
Higher means more compression, but also slower read and
write times. Using a value of 3 is often a good compromise.
See the notes for more details.
cache_size: positive number, optional
Fixes the order of magnitude (in megabytes) of the cache used
for in-memory compression. Note that this is just an order of
magnitude estimate and that for big arrays, the code will go
over this value at dump and at load time.
Returns
-------
filenames: list of strings
The list of file names in which the data is stored. If
compress is false, each array is stored in a different file.
See Also
--------
joblib.load : corresponding loader
Notes
-----
Memmapping on load cannot be used for compressed files. Thus
using compression can significantly slow down loading. In
addition, compressed files take extra extra memory during
dump and load.
"""
if compress is True:
# By default, if compress is enabled, we want to be using 3 by
# default
compress = 3
if not isinstance(filename, _basestring):
# People keep inverting arguments, and the resulting error is
# incomprehensible
raise ValueError(
'Second argument should be a filename, %s (type %s) was given'
% (filename, type(filename))
)
try:
pickler = NumpyPickler(filename, compress=compress,
cache_size=cache_size)
pickler.dump(value)
pickler.close()
finally:
if 'pickler' in locals() and hasattr(pickler, 'file'):
pickler.file.flush()
pickler.file.close()
return pickler._filenames
def load(filename, mmap_mode=None):
"""Reconstruct a Python object from a file persisted with joblib.load.
Parameters
-----------
filename: string
The name of the file from which to load the object
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, the arrays are memory-mapped from the disk. This
mode has not effect for compressed files. Note that in this
case the reconstructed object might not longer match exactly
the originally pickled object.
Returns
-------
result: any Python object
The object stored in the file.
See Also
--------
joblib.dump : function to save an object
Notes
-----
This function can load numpy array files saved separately during the
dump. If the mmap_mode argument is given, it is passed to np.load and
arrays are loaded as memmaps. As a consequence, the reconstructed
object might not match the original pickled object. Note that if the
file was saved with compression, the arrays cannot be memmaped.
"""
file_handle = open(filename, 'rb')
# We are careful to open the file handle early and keep it open to
# avoid race-conditions on renames. That said, if data are stored in
# companion files, moving the directory will create a race when
# joblib tries to access the companion files.
if _read_magic(file_handle) == _ZFILE_PREFIX:
if mmap_mode is not None:
warnings.warn('file "%(filename)s" appears to be a zip, '
'ignoring mmap_mode "%(mmap_mode)s" flag passed'
% locals(), Warning, stacklevel=2)
unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle)
else:
unpickler = NumpyUnpickler(filename,
file_handle=file_handle,
mmap_mode=mmap_mode)
try:
obj = unpickler.load()
finally:
if hasattr(unpickler, 'file_handle'):
unpickler.file_handle.close()
return obj
|
|
from __future__ import unicode_literals
import codecs
import os
import re
import warnings
from django.conf import settings
from django.core.management.base import CommandError
from django.db import models, router
def sql_create(app, style, connection):
"Returns a list of the CREATE TABLE SQL statements for the given app."
if connection.settings_dict['ENGINE'] == 'django.db.backends.dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set ENGINE for the database.
raise CommandError("Django doesn't know which syntax to use for your SQL statements,\n" +
"because you haven't properly specified the ENGINE setting for the database.\n" +
"see: https://docs.djangoproject.com/en/dev/ref/settings/#databases")
# Get installed models, so we generate REFERENCES right.
# We trim models from the current app so that the sqlreset command does not
# generate invalid SQL (leaving models out of known_models is harmless, so
# we can be conservative).
app_models = models.get_models(app, include_auto_created=True)
final_output = []
tables = connection.introspection.table_names()
known_models = set(model for model in connection.introspection.installed_models(tables) if model not in app_models)
pending_references = {}
for model in router.get_migratable_models(app, connection.alias, include_auto_created=True):
output, references = connection.creation.sql_create_model(model, style, known_models)
final_output.extend(output)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in known_models:
final_output.extend(connection.creation.sql_for_pending_references(refto, style, pending_references))
final_output.extend(connection.creation.sql_for_pending_references(model, style, pending_references))
# Keep track of the fact that we've created the table for this model.
known_models.add(model)
# Handle references to tables that are from other apps
# but don't exist physically.
not_installed_models = set(pending_references.keys())
if not_installed_models:
alter_sql = []
for model in not_installed_models:
alter_sql.extend(['-- ' + sql for sql in
connection.creation.sql_for_pending_references(model, style, pending_references)])
if alter_sql:
final_output.append('-- The following references should be added but depend on non-existent tables:')
final_output.extend(alter_sql)
return final_output
def sql_delete(app, style, connection):
"Returns a list of the DROP TABLE SQL statements for the given app."
# This should work even if a connection isn't available
try:
cursor = connection.cursor()
except Exception:
cursor = None
# Figure out which tables already exist
if cursor:
table_names = connection.introspection.table_names(cursor)
else:
table_names = []
output = []
# Output DROP TABLE statements for standard application tables.
to_delete = set()
references_to_delete = {}
app_models = router.get_migratable_models(app, connection.alias, include_auto_created=True)
for model in app_models:
if cursor and connection.introspection.table_name_converter(model._meta.db_table) in table_names:
# The table exists, so it needs to be dropped
opts = model._meta
for f in opts.local_fields:
if f.rel and f.rel.to not in to_delete:
references_to_delete.setdefault(f.rel.to, []).append((model, f))
to_delete.add(model)
for model in app_models:
if connection.introspection.table_name_converter(model._meta.db_table) in table_names:
output.extend(connection.creation.sql_destroy_model(model, references_to_delete, style))
# Close database connection explicitly, in case this output is being piped
# directly into a database client, to avoid locking issues.
if cursor:
cursor.close()
connection.close()
return output[::-1] # Reverse it, to deal with table dependencies.
def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True)
else:
tables = connection.introspection.table_names()
seqs = connection.introspection.sequence_list() if reset_sequences else ()
statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade)
return statements
def sql_custom(app, style, connection):
"Returns a list of the custom table modifying SQL statements for the given app."
output = []
app_models = router.get_migratable_models(app, connection.alias)
for model in app_models:
output.extend(custom_sql_for_model(model, style, connection))
return output
def sql_indexes(app, style, connection):
"Returns a list of the CREATE INDEX SQL statements for all models in the given app."
output = []
for model in router.get_migratable_models(app, connection.alias, include_auto_created=True):
output.extend(connection.creation.sql_indexes_for_model(model, style))
return output
def sql_destroy_indexes(app, style, connection):
"Returns a list of the DROP INDEX SQL statements for all models in the given app."
output = []
for model in router.get_migratable_models(app, connection.alias, include_auto_created=True):
output.extend(connection.creation.sql_destroy_indexes_for_model(model, style))
return output
def sql_all(app, style, connection):
"Returns a list of CREATE TABLE SQL, initial-data inserts, and CREATE INDEX SQL for the given module."
return sql_create(app, style, connection) + sql_custom(app, style, connection) + sql_indexes(app, style, connection)
def _split_statements(content):
comment_re = re.compile(r"^((?:'[^']*'|[^'])*?)--.*$")
statements = []
statement = []
for line in content.split("\n"):
cleaned_line = comment_re.sub(r"\1", line).strip()
if not cleaned_line:
continue
statement.append(cleaned_line)
if cleaned_line.endswith(";"):
statements.append(" ".join(statement))
statement = []
return statements
def custom_sql_for_model(model, style, connection):
opts = model._meta
app_dirs = []
app_dir = models.get_app_path(model._meta.app_label)
app_dirs.append(os.path.normpath(os.path.join(app_dir, 'sql')))
# Deprecated location -- remove in Django 1.9
old_app_dir = os.path.normpath(os.path.join(app_dir, 'models/sql'))
if os.path.exists(old_app_dir):
warnings.warn("Custom SQL location '<app_label>/models/sql' is "
"deprecated, use '<app_label>/sql' instead.",
PendingDeprecationWarning)
app_dirs.append(old_app_dir)
output = []
# Post-creation SQL should come before any initial SQL data is loaded.
# However, this should not be done for models that are unmanaged or
# for fields that are part of a parent model (via model inheritance).
if opts.managed:
post_sql_fields = [f for f in opts.local_fields if hasattr(f, 'post_create_sql')]
for f in post_sql_fields:
output.extend(f.post_create_sql(style, model._meta.db_table))
# Find custom SQL, if it's available.
backend_name = connection.settings_dict['ENGINE'].split('.')[-1]
sql_files = []
for app_dir in app_dirs:
sql_files.append(os.path.join(app_dir, "%s.%s.sql" % (opts.model_name, backend_name)))
sql_files.append(os.path.join(app_dir, "%s.sql" % opts.model_name))
for sql_file in sql_files:
if os.path.exists(sql_file):
with codecs.open(sql_file, 'U', encoding=settings.FILE_CHARSET) as fp:
# Some backends can't execute more than one SQL statement at a time,
# so split into separate statements.
output.extend(_split_statements(fp.read()))
return output
def emit_pre_migrate_signal(create_models, verbosity, interactive, db):
# Emit the pre_migrate signal for every application.
for app in models.get_apps():
app_name = app.__name__.split('.')[-2]
if verbosity >= 2:
print("Running pre-migrate handlers for application %s" % app_name)
models.signals.pre_migrate.send(sender=app, app=app,
create_models=create_models,
verbosity=verbosity,
interactive=interactive,
db=db)
def emit_post_migrate_signal(created_models, verbosity, interactive, db):
# Emit the post_migrate signal for every application.
for app in models.get_apps():
app_name = app.__name__.split('.')[-2]
if verbosity >= 2:
print("Running post-migrate handlers for application %s" % app_name)
models.signals.post_migrate.send(sender=app, app=app,
created_models=created_models, verbosity=verbosity,
interactive=interactive, db=db)
|
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
import mock
from nova.compute import power_state
from nova.compute import task_states
from nova import exception
from nova import test
from nova.tests.virt.xenapi import stubs
from nova.virt import fake
from nova.virt.xenapi import agent as xenapi_agent
from nova.virt.xenapi.client import session as xenapi_session
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
class VMOpsTestBase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(VMOpsTestBase, self).setUp()
self._setup_mock_vmops()
self.vms = []
def _setup_mock_vmops(self, product_brand=None, product_version=None):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
self._session = xenapi_session.XenAPISession('test_url', 'root',
'test_pass')
self.vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
def create_vm(self, name, state="Running"):
vm_ref = xenapi_fake.create_vm(name, state)
self.vms.append(vm_ref)
vm = xenapi_fake.get_record("VM", vm_ref)
return vm, vm_ref
def tearDown(self):
super(VMOpsTestBase, self).tearDown()
for vm in self.vms:
xenapi_fake.destroy_vm(vm)
class VMOpsTestCase(VMOpsTestBase):
def setUp(self):
super(VMOpsTestCase, self).setUp()
self._setup_mock_vmops()
def _setup_mock_vmops(self, product_brand=None, product_version=None):
self._session = self._get_mock_session(product_brand, product_version)
self._vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
def _get_mock_session(self, product_brand, product_version):
class Mock(object):
pass
mock_session = Mock()
mock_session.product_brand = product_brand
mock_session.product_version = product_version
return mock_session
def _test_finish_revert_migration_after_crash(self, backup_made, new_made,
vm_shutdown=True):
instance = {'name': 'foo',
'task_state': task_states.RESIZE_MIGRATING}
context = 'fake_context'
self.mox.StubOutWithMock(vm_utils, 'lookup')
self.mox.StubOutWithMock(self._vmops, '_destroy')
self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label')
self.mox.StubOutWithMock(self._vmops, '_attach_mapped_block_devices')
self.mox.StubOutWithMock(self._vmops, '_start')
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
vm_utils.lookup(self._session, 'foo-orig').AndReturn(
backup_made and 'foo' or None)
vm_utils.lookup(self._session, 'foo').AndReturn(
(not backup_made or new_made) and 'foo' or None)
if backup_made:
if new_made:
self._vmops._destroy(instance, 'foo')
vm_utils.set_vm_name_label(self._session, 'foo', 'foo')
self._vmops._attach_mapped_block_devices(instance, [])
vm_utils.is_vm_shutdown(self._session, 'foo').AndReturn(vm_shutdown)
if vm_shutdown:
self._vmops._start(instance, 'foo')
self.mox.ReplayAll()
self._vmops.finish_revert_migration(context, instance, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(True, False)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(False, False)
def test_xsm_sr_check_relaxed_cached(self):
self.make_plugin_call_count = 0
def fake_make_plugin_call(plugin, method, **args):
self.make_plugin_call_count = self.make_plugin_call_count + 1
return "true"
self.stubs.Set(self._vmops, "_make_plugin_call",
fake_make_plugin_call)
self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
self.assertEqual(self.make_plugin_call_count, 1)
def test_get_vm_opaque_ref_raises_instance_not_found(self):
instance = {"name": "dummy"}
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(self._session, instance['name'], False).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceNotFound,
self._vmops._get_vm_opaque_ref, instance)
class InjectAutoDiskConfigTestCase(VMOpsTestBase):
def setUp(self):
super(InjectAutoDiskConfigTestCase, self).setUp()
def test_inject_auto_disk_config_when_present(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": True}
self.vmops._inject_auto_disk_config(instance, vm_ref)
xenstore_data = vm['xenstore_data']
self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'True')
def test_inject_auto_disk_config_none_as_false(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
self.vmops._inject_auto_disk_config(instance, vm_ref)
xenstore_data = vm['xenstore_data']
self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'False')
class GetConsoleOutputTestCase(VMOpsTestBase):
def setUp(self):
super(GetConsoleOutputTestCase, self).setUp()
def test_get_console_output_works(self):
self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
instance = {"name": "dummy"}
self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(42)
self.mox.ReplayAll()
self.assertEqual("dom_id: 42", self.vmops.get_console_output(instance))
def test_get_console_output_throws_nova_exception(self):
self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
instance = {"name": "dummy"}
# dom_id=0 used to trigger exception in fake XenAPI
self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(0)
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.vmops.get_console_output, instance)
def test_get_dom_id_works(self):
instance = {"name": "dummy"}
vm, vm_ref = self.create_vm("dummy")
self.assertEqual(vm["domid"], self.vmops._get_dom_id(instance))
def test_get_dom_id_works_with_rescue_vm(self):
instance = {"name": "dummy"}
vm, vm_ref = self.create_vm("dummy-rescue")
self.assertEqual(vm["domid"],
self.vmops._get_dom_id(instance, check_rescue=True))
def test_get_dom_id_raises_not_found(self):
instance = {"name": "dummy"}
self.create_vm("not-dummy")
self.assertRaises(exception.NotFound, self.vmops._get_dom_id, instance)
def test_get_dom_id_works_with_vmref(self):
vm, vm_ref = self.create_vm("dummy")
self.assertEqual(vm["domid"],
self.vmops._get_dom_id(vm_ref=vm_ref))
class SpawnTestCase(VMOpsTestBase):
def _stub_out_common(self):
self.mox.StubOutWithMock(self.vmops, '_ensure_instance_name_unique')
self.mox.StubOutWithMock(self.vmops, '_ensure_enough_free_mem')
self.mox.StubOutWithMock(self.vmops, '_update_instance_progress')
self.mox.StubOutWithMock(vm_utils, 'determine_disk_image_type')
self.mox.StubOutWithMock(vm_utils, 'get_vdis_for_instance')
self.mox.StubOutWithMock(vm_utils, 'safe_destroy_vdis')
self.mox.StubOutWithMock(self.vmops, '_resize_up_vdis')
self.mox.StubOutWithMock(vm_utils,
'create_kernel_and_ramdisk')
self.mox.StubOutWithMock(vm_utils, 'destroy_kernel_ramdisk')
self.mox.StubOutWithMock(self.vmops, '_create_vm_record')
self.mox.StubOutWithMock(self.vmops, '_destroy')
self.mox.StubOutWithMock(self.vmops, '_attach_disks')
self.mox.StubOutWithMock(self.vmops, '_attach_orig_disk_for_rescue')
self.mox.StubOutWithMock(self.vmops, 'inject_network_info')
self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
self.mox.StubOutWithMock(self.vmops, '_inject_instance_metadata')
self.mox.StubOutWithMock(self.vmops, '_inject_auto_disk_config')
self.mox.StubOutWithMock(self.vmops, '_file_inject_vm_settings')
self.mox.StubOutWithMock(self.vmops, '_create_vifs')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'setup_basic_filtering')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'prepare_instance_filter')
self.mox.StubOutWithMock(self.vmops, '_start')
self.mox.StubOutWithMock(self.vmops, '_wait_for_instance_to_start')
self.mox.StubOutWithMock(self.vmops,
'_configure_new_instance_with_agent')
self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'apply_instance_filter')
def _test_spawn(self, name_label_param=None, block_device_info_param=None,
rescue=False, include_root_vdi=True,
throw_exception=None):
self._stub_out_common()
instance = {"name": "dummy", "uuid": "fake_uuid"}
name_label = name_label_param
if name_label is None:
name_label = "dummy"
image_meta = {"id": "image_id"}
context = "context"
session = self.vmops._session
injected_files = "fake_files"
admin_password = "password"
network_info = "net_info"
steps = 10
if rescue:
steps += 1
block_device_info = block_device_info_param
if block_device_info and not block_device_info['root_device_name']:
block_device_info = dict(block_device_info_param)
block_device_info['root_device_name'] = \
self.vmops.default_root_dev
di_type = "di_type"
vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
step = 1
self.vmops._update_instance_progress(context, instance, step, steps)
vdis = {"other": {"ref": "fake_ref_2", "osvol": True}}
if include_root_vdi:
vdis["root"] = {"ref": "fake_ref"}
vm_utils.get_vdis_for_instance(context, session, instance, name_label,
"image_id", di_type,
block_device_info=block_device_info).AndReturn(vdis)
self.vmops._resize_up_vdis(instance, vdis)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
kernel_file = "kernel"
ramdisk_file = "ramdisk"
vm_utils.create_kernel_and_ramdisk(context, session,
instance, name_label).AndReturn((kernel_file, ramdisk_file))
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
vm_ref = "fake_vm_ref"
self.vmops._ensure_instance_name_unique(name_label)
self.vmops._ensure_enough_free_mem(instance)
self.vmops._create_vm_record(context, instance, name_label,
di_type, kernel_file,
ramdisk_file, image_meta).AndReturn(vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
network_info, admin_password, injected_files)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._inject_instance_metadata(instance, vm_ref)
self.vmops._inject_auto_disk_config(instance, vm_ref)
self.vmops._inject_hostname(instance, vm_ref, rescue)
self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
network_info)
self.vmops.inject_network_info(instance, network_info, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._create_vifs(instance, vm_ref, network_info)
self.vmops.firewall_driver.setup_basic_filtering(instance,
network_info).AndRaise(NotImplementedError)
self.vmops.firewall_driver.prepare_instance_filter(instance,
network_info)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
if rescue:
self.vmops._attach_orig_disk_for_rescue(instance, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step,
steps)
self.vmops._start(instance, vm_ref)
self.vmops._wait_for_instance_to_start(instance, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._configure_new_instance_with_agent(instance, vm_ref,
injected_files, admin_password)
self.vmops._remove_hostname(instance, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops.firewall_driver.apply_instance_filter(instance,
network_info)
step += 1
last_call = self.vmops._update_instance_progress(context, instance,
step, steps)
if throw_exception:
last_call.AndRaise(throw_exception)
self.vmops._destroy(instance, vm_ref, network_info=network_info)
vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
kernel_file, ramdisk_file)
vm_utils.safe_destroy_vdis(self.vmops._session, ["fake_ref"])
self.mox.ReplayAll()
self.vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info,
block_device_info_param, name_label_param, rescue)
def test_spawn(self):
self._test_spawn()
def test_spawn_with_alternate_options(self):
self._test_spawn(include_root_vdi=False, rescue=True,
name_label_param="bob",
block_device_info_param={"root_device_name": ""})
def test_spawn_performs_rollback_and_throws_exception(self):
self.assertRaises(test.TestingException, self._test_spawn,
throw_exception=test.TestingException())
def _test_finish_migration(self, power_on=True, resize_instance=True,
throw_exception=None):
self._stub_out_common()
self.mox.StubOutWithMock(vm_utils, "import_all_migrated_disks")
self.mox.StubOutWithMock(self.vmops, "_attach_mapped_block_devices")
context = "context"
migration = {}
name_label = "dummy"
instance = {"name": name_label, "uuid": "fake_uuid"}
disk_info = "disk_info"
network_info = "net_info"
image_meta = {"id": "image_id"}
block_device_info = "bdi"
session = self.vmops._session
self.vmops._ensure_instance_name_unique(name_label)
self.vmops._ensure_enough_free_mem(instance)
di_type = "di_type"
vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
root_vdi = {"ref": "fake_ref"}
ephemeral_vdi = {"ref": "fake_ref_e"}
vdis = {"root": root_vdi, "ephemerals": {4: ephemeral_vdi}}
vm_utils.import_all_migrated_disks(self.vmops._session,
instance).AndReturn(vdis)
kernel_file = "kernel"
ramdisk_file = "ramdisk"
vm_utils.create_kernel_and_ramdisk(context, session,
instance, name_label).AndReturn((kernel_file, ramdisk_file))
vm_ref = "fake_vm_ref"
self.vmops._create_vm_record(context, instance, name_label,
di_type, kernel_file,
ramdisk_file, image_meta).AndReturn(vm_ref)
if resize_instance:
self.vmops._resize_up_vdis(instance, vdis)
self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
network_info, None, None)
self.vmops._attach_mapped_block_devices(instance, block_device_info)
self.vmops._inject_instance_metadata(instance, vm_ref)
self.vmops._inject_auto_disk_config(instance, vm_ref)
self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
network_info)
self.vmops.inject_network_info(instance, network_info, vm_ref)
self.vmops._create_vifs(instance, vm_ref, network_info)
self.vmops.firewall_driver.setup_basic_filtering(instance,
network_info).AndRaise(NotImplementedError)
self.vmops.firewall_driver.prepare_instance_filter(instance,
network_info)
if power_on:
self.vmops._start(instance, vm_ref)
self.vmops._wait_for_instance_to_start(instance, vm_ref)
self.vmops.firewall_driver.apply_instance_filter(instance,
network_info)
last_call = self.vmops._update_instance_progress(context, instance,
step=5, total_steps=5)
if throw_exception:
last_call.AndRaise(throw_exception)
self.vmops._destroy(instance, vm_ref, network_info=network_info)
vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
kernel_file, ramdisk_file)
vm_utils.safe_destroy_vdis(self.vmops._session,
["fake_ref_e", "fake_ref"])
self.mox.ReplayAll()
self.vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def test_finish_migration(self):
self._test_finish_migration()
def test_finish_migration_no_power_on(self):
self._test_finish_migration(power_on=False, resize_instance=False)
def test_finish_migrate_performs_rollback_on_error(self):
self.assertRaises(test.TestingException, self._test_finish_migration,
power_on=False, resize_instance=False,
throw_exception=test.TestingException())
def test_remove_hostname(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
self.mox.StubOutWithMock(self._session, 'call_xenapi')
self._session.call_xenapi("VM.remove_from_xenstore_data", vm_ref,
"vm-data/hostname")
self.mox.ReplayAll()
self.vmops._remove_hostname(instance, vm_ref)
self.mox.VerifyAll()
def test_reset_network(self):
class mock_agent(object):
def __init__(self):
self.called = False
def resetnetwork(self):
self.called = True
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
agent = mock_agent()
self.mox.StubOutWithMock(self.vmops, 'agent_enabled')
self.mox.StubOutWithMock(self.vmops, '_get_agent')
self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
self.vmops.agent_enabled(instance).AndReturn(True)
self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
self.vmops._inject_hostname(instance, vm_ref, False)
self.vmops._remove_hostname(instance, vm_ref)
self.mox.ReplayAll()
self.vmops.reset_network(instance)
self.assertTrue(agent.called)
self.mox.VerifyAll()
def test_inject_hostname(self):
instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname', 'dummy')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=False)
def test_inject_hostname_with_rescue_prefix(self):
instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
'RESCUE-dummy')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=True)
def test_inject_hostname_with_windows_name_truncation(self):
instance = {"hostname": "dummydummydummydummydummy",
"os_type": "windows", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
'RESCUE-dummydum')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=True)
def test_wait_for_instance_to_start(self):
instance = {"uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(vm_utils, 'get_power_state')
self.mox.StubOutWithMock(greenthread, 'sleep')
vm_utils.get_power_state(self._session, vm_ref).AndReturn(
power_state.SHUTDOWN)
greenthread.sleep(0.5)
vm_utils.get_power_state(self._session, vm_ref).AndReturn(
power_state.RUNNING)
self.mox.ReplayAll()
self.vmops._wait_for_instance_to_start(instance, vm_ref)
def test_attach_orig_disk_for_rescue(self):
instance = {"name": "dummy"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(vm_utils, 'lookup')
self.mox.StubOutWithMock(self.vmops, '_find_root_vdi_ref')
self.mox.StubOutWithMock(vm_utils, 'create_vbd')
vm_utils.lookup(self.vmops._session, "dummy").AndReturn("ref")
self.vmops._find_root_vdi_ref("ref").AndReturn("vdi_ref")
vm_utils.create_vbd(self.vmops._session, vm_ref, "vdi_ref",
vmops.DEVICE_RESCUE, bootable=False)
self.mox.ReplayAll()
self.vmops._attach_orig_disk_for_rescue(instance, vm_ref)
def test_agent_update_setup(self):
# agent updates need to occur after networking is configured
instance = {'name': 'betelgeuse',
'uuid': '1-2-3-4-5-6'}
vm_ref = 'vm_ref'
agent = xenapi_agent.XenAPIBasedAgent(self.vmops._session,
self.vmops._virtapi, instance, vm_ref)
self.mox.StubOutWithMock(xenapi_agent, 'should_use_agent')
self.mox.StubOutWithMock(self.vmops, '_get_agent')
self.mox.StubOutWithMock(agent, 'get_version')
self.mox.StubOutWithMock(agent, 'resetnetwork')
self.mox.StubOutWithMock(agent, 'update_if_needed')
xenapi_agent.should_use_agent(instance).AndReturn(True)
self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
agent.get_version().AndReturn('1.2.3')
agent.resetnetwork()
agent.update_if_needed('1.2.3')
self.mox.ReplayAll()
self.vmops._configure_new_instance_with_agent(instance, vm_ref,
None, None)
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(vmops.VMOps, '_detach_block_devices_from_orig_vm')
@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_down')
@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_up')
class MigrateDiskAndPowerOffTestCase(VMOpsTestBase):
def test_migrate_disk_and_power_off_works_down(self,
migrate_up, migrate_down, *mocks):
instance = {"root_gb": 2, "ephemeral_gb": 0, "uuid": "uuid"}
flavor = {"root_gb": 1, "ephemeral_gb": 0}
self.vmops.migrate_disk_and_power_off(None, instance, None,
flavor, None)
self.assertFalse(migrate_up.called)
self.assertTrue(migrate_down.called)
def test_migrate_disk_and_power_off_works_up(self,
migrate_up, migrate_down, *mocks):
instance = {"root_gb": 1, "ephemeral_gb": 1, "uuid": "uuid"}
flavor = {"root_gb": 2, "ephemeral_gb": 2}
self.vmops.migrate_disk_and_power_off(None, instance, None,
flavor, None)
self.assertFalse(migrate_down.called)
self.assertTrue(migrate_up.called)
def test_migrate_disk_and_power_off_resize_down_ephemeral_fails(self,
migrate_up, migrate_down, *mocks):
instance = {"ephemeral_gb": 2}
flavor = {"ephemeral_gb": 1}
self.assertRaises(exception.ResizeError,
self.vmops.migrate_disk_and_power_off,
None, instance, None, flavor, None)
@mock.patch.object(vm_utils, 'migrate_vhd')
@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
@mock.patch.object(vm_utils, 'get_all_vdi_uuids_for_vm')
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
class MigrateDiskResizingUpTestCase(VMOpsTestBase):
def _fake_snapshot_attached_here(self, session, instance, vm_ref, label,
userdevice, post_snapshot_callback):
self.assertIsInstance(instance, dict)
if userdevice == '0':
self.assertEqual("vm_ref", vm_ref)
self.assertEqual("fake-snapshot", label)
yield ["leaf", "parent", "grandp"]
else:
leaf = userdevice + "-leaf"
parent = userdevice + "-parent"
yield [leaf, parent]
def test_migrate_disk_resizing_up_works_no_ephemeral(self,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd):
context = "ctxt"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_get_all_vdi_uuids.return_value = None
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.vmops._migrate_disk_resizing_up(context, instance, dest,
vm_ref, sr_path)
mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
vm_ref, min_userdevice=4)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_shutdown.assert_called_once_with(instance, vm_ref)
m_vhd_expected = [mock.call(self.vmops._session, instance, "parent",
dest, sr_path, 1),
mock.call(self.vmops._session, instance, "grandp",
dest, sr_path, 2),
mock.call(self.vmops._session, instance, "leaf",
dest, sr_path, 0)]
self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected, mock_update_progress.call_args_list)
def test_migrate_disk_resizing_up_works_with_two_ephemeral(self,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd):
context = "ctxt"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"]
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.vmops._migrate_disk_resizing_up(context, instance, dest,
vm_ref, sr_path)
mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
vm_ref, min_userdevice=4)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_shutdown.assert_called_once_with(instance, vm_ref)
m_vhd_expected = [mock.call(self.vmops._session, instance,
"parent", dest, sr_path, 1),
mock.call(self.vmops._session, instance,
"grandp", dest, sr_path, 2),
mock.call(self.vmops._session, instance,
"4-parent", dest, sr_path, 1, 1),
mock.call(self.vmops._session, instance,
"5-parent", dest, sr_path, 1, 2),
mock.call(self.vmops._session, instance,
"leaf", dest, sr_path, 0),
mock.call(self.vmops._session, instance,
"4-leaf", dest, sr_path, 0, 1),
mock.call(self.vmops._session, instance,
"5-leaf", dest, sr_path, 0, 2)]
self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected, mock_update_progress.call_args_list)
@mock.patch.object(vmops.VMOps, '_restore_orig_vm_and_cleanup_orphan')
def test_migrate_disk_resizing_up_rollback(self,
mock_restore,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd):
context = "ctxt"
instance = {"name": "fake", "uuid": "fake"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_migrate_vhd.side_effect = test.TestingException
mock_restore.side_effect = test.TestingException
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.assertRaises(exception.InstanceFaultRollback,
self.vmops._migrate_disk_resizing_up,
context, instance, dest, vm_ref, sr_path)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_restore.assert_called_once_with(instance)
mock_migrate_vhd.assert_called_once_with(self.vmops._session,
instance, "parent", dest, sr_path, 1)
class CreateVMRecordTestCase(VMOpsTestBase):
@mock.patch.object(vm_utils, 'determine_vm_mode')
@mock.patch.object(vm_utils, 'get_vm_device_id')
@mock.patch.object(vm_utils, 'create_vm')
def test_create_vm_record_with_vm_device_id(self, mock_create_vm,
mock_get_vm_device_id, mock_determine_vm_mode):
context = "context"
instance = {"vm_mode": "vm_mode", "uuid": "uuid123"}
name_label = "dummy"
disk_image_type = "vhd"
kernel_file = "kernel"
ramdisk_file = "ram"
device_id = "0002"
image_properties = {"xenapi_device_id": device_id}
image_meta = {"properties": image_properties}
session = "session"
self.vmops._session = session
mock_get_vm_device_id.return_value = device_id
mock_determine_vm_mode.return_value = "vm_mode"
self.vmops._create_vm_record(context, instance, name_label,
disk_image_type, kernel_file, ramdisk_file, image_meta)
mock_get_vm_device_id.assert_called_with(session, image_properties)
mock_create_vm.assert_called_with(session, instance, name_label,
kernel_file, ramdisk_file, False, device_id)
class BootableTestCase(VMOpsTestBase):
def setUp(self):
super(BootableTestCase, self).setUp()
self.instance = {"name": "test", "uuid": "fake"}
vm_rec, self.vm_ref = self.create_vm('test')
# sanity check bootlock is initially disabled:
self.assertEqual({}, vm_rec['blocked_operations'])
def _get_blocked(self):
vm_rec = self._session.call_xenapi("VM.get_record", self.vm_ref)
return vm_rec['blocked_operations']
def test_acquire_bootlock(self):
self.vmops._acquire_bootlock(self.vm_ref)
blocked = self._get_blocked()
self.assertIn('start', blocked)
def test_release_bootlock(self):
self.vmops._acquire_bootlock(self.vm_ref)
self.vmops._release_bootlock(self.vm_ref)
blocked = self._get_blocked()
self.assertNotIn('start', blocked)
def test_set_bootable(self):
self.vmops.set_bootable(self.instance, True)
blocked = self._get_blocked()
self.assertNotIn('start', blocked)
def test_set_not_bootable(self):
self.vmops.set_bootable(self.instance, False)
blocked = self._get_blocked()
self.assertIn('start', blocked)
@mock.patch.object(vm_utils, 'update_vdi_virtual_size')
class ResizeVdisTestCase(VMOpsTestBase):
def test_resize_up_vdis_root(self, mock_resize):
instance = {"root_gb": 20, "ephemeral_gb": 0}
self.vmops._resize_up_vdis(instance, {"root": {"ref": "vdi_ref"}})
mock_resize.assert_called_once_with(self.vmops._session, instance,
"vdi_ref", 20)
def test_resize_up_vdis_zero_disks(self, mock_resize):
instance = {"root_gb": 0, "ephemeral_gb": 0}
self.vmops._resize_up_vdis(instance, {"root": {}})
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_resize_up_vdis_ephemeral(self, mock_sizes, mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = {"root_gb": 0, "ephemeral_gb": 3000}
ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
vdis = {"ephemerals": ephemerals}
self.vmops._resize_up_vdis(instance, vdis)
mock_sizes.assert_called_once_with(3000)
expected = [mock.call(self.vmops._session, instance, 4, 2000),
mock.call(self.vmops._session, instance, 5, 1000)]
self.assertEqual(expected, mock_resize.call_args_list)
@mock.patch.object(vm_utils, 'generate_single_ephemeral')
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_resize_up_vdis_ephemeral_with_generate(self, mock_sizes,
mock_generate,
mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = {"root_gb": 0, "ephemeral_gb": 3000, "uuid": "a"}
ephemerals = {"4": {"ref": 4}}
vdis = {"ephemerals": ephemerals}
self.vmops._resize_up_vdis(instance, vdis)
mock_sizes.assert_called_once_with(3000)
mock_resize.assert_called_once_with(self.vmops._session, instance,
4, 2000)
mock_generate.assert_called_once_with(self.vmops._session, instance,
None, 5, 1000)
@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
@mock.patch.object(vm_utils, 'resize_disk')
@mock.patch.object(vm_utils, 'migrate_vhd')
@mock.patch.object(vm_utils, 'destroy_vdi')
class MigrateDiskResizingDownTestCase(VMOpsTestBase):
def test_migrate_disk_resizing_down_works_no_ephemeral(
self,
mock_destroy_vdi,
mock_migrate_vhd,
mock_resize_disk,
mock_get_vdi_for_vm_safely,
mock_update_instance_progress,
mock_apply_orig_vm_name_label,
mock_resize_ensure_vm_is_shutdown):
context = "ctx"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
instance_type = dict(root_gb=1)
old_vdi_ref = "old_ref"
new_vdi_ref = "new_ref"
new_vdi_uuid = "new_uuid"
mock_get_vdi_for_vm_safely.return_value = (old_vdi_ref, None)
mock_resize_disk.return_value = (new_vdi_ref, new_vdi_uuid)
self.vmops._migrate_disk_resizing_down(context, instance, dest,
instance_type, vm_ref, sr_path)
mock_get_vdi_for_vm_safely.assert_called_once_with(
self.vmops._session,
vm_ref)
mock_resize_ensure_vm_is_shutdown.assert_called_once_with(
instance, vm_ref)
mock_apply_orig_vm_name_label.assert_called_once_with(
instance, vm_ref)
mock_resize_disk.assert_called_once_with(
self.vmops._session,
instance,
old_vdi_ref,
instance_type)
mock_migrate_vhd.assert_called_once_with(
self.vmops._session,
instance,
new_vdi_uuid,
dest,
sr_path, 0)
mock_destroy_vdi.assert_called_once_with(
self.vmops._session,
new_vdi_ref)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected,
mock_update_instance_progress.call_args_list)
|
|
from __future__ import absolute_import, print_function
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth import login
from django.db import transaction
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from hashlib import md5
from uuid import uuid4
from sentry.models import (
AuditLogEntry, AuditLogEntryEvent, AuthIdentity, AuthProvider, Organization,
OrganizationMember, OrganizationMemberTeam, User
)
from sentry.utils.auth import get_login_redirect
from sentry.web.helpers import render_to_response
from . import manager
OK_LINK_IDENTITY = _('You have successully linked your account to your SSO provider.')
OK_SETUP_SSO = _('SSO has been configured for your organization and any existing members have been sent an email to link their accounts.')
ERR_UID_MISMATCH = _('There was an error encountered during authentication.')
ERR_NOT_AUTHED = _('You must be authenticated to link accounts.')
class AuthHelper(object):
"""
Helper class which is passed into AuthView's.
Designed to link provider and views as well as manage the state and
pipeline.
Auth has several flows:
1. The user is going through provider setup, thus enforcing that they link
their current account to the new auth identity.
2. The user is anonymous and creating a brand new account.
3. The user is anonymous and logging into an existing account.
4. The user is anonymous and creating a brand new account, but may have an
existing account that should/could be merged.
5. The user is authenticated and creating a new identity, thus associating
it with their current account.
6. The user is authenticated and creating a new identity, but not linking
it with their account (thus creating a new account).
"""
# logging in or registering
FLOW_LOGIN = 1
# configuring the provider
FLOW_SETUP_PROVIDER = 2
# linking an identity to an existing account
FLOW_LINK_IDENTITY = 3
@classmethod
def get_for_request(cls, request):
session = request.session.get('auth', {})
organization_id = session.get('org')
if not organization_id:
logging.info('Invalid SSO data found')
return None
flow = session['flow']
auth_provider_id = session.get('ap')
provider_key = session.get('p')
if auth_provider_id:
auth_provider = AuthProvider.objects.get(
id=auth_provider_id
)
elif provider_key:
auth_provider = None
organization = Organization.objects.get(
id=session['org'],
)
return cls(request, organization, flow,
auth_provider=auth_provider, provider_key=provider_key)
def __init__(self, request, organization, flow, auth_provider=None,
provider_key=None):
assert provider_key or auth_provider
self.request = request
self.auth_provider = auth_provider
self.organization = organization
self.flow = flow
if auth_provider:
provider = auth_provider.get_provider()
elif provider_key:
provider = manager.get(provider_key)
else:
raise NotImplementedError
self.provider = provider
if flow in (self.FLOW_LOGIN, self.FLOW_LINK_IDENTITY):
self.pipeline = provider.get_auth_pipeline()
elif flow == self.FLOW_SETUP_PROVIDER:
self.pipeline = provider.get_setup_pipeline()
else:
raise NotImplementedError
# we serialize the pipeline to be [AuthView().get_ident(), ...] which
# allows us to determine if the pipeline has changed during the auth
# flow or if the user is somehow circumventing a chunk of it
self.signature = md5(
' '.join(av.get_ident() for av in self.pipeline)
).hexdigest()
def pipeline_is_valid(self):
session = self.request.session.get('auth', {})
if not session:
return False
return session.get('sig') == self.signature
def init_pipeline(self):
session = {
'uid': self.request.user.id if self.request.user.is_authenticated() else None,
'ap': self.auth_provider.id if self.auth_provider else None,
'p': self.provider.key,
'org': self.organization.id,
'idx': -1,
'sig': self.signature,
'flow': self.flow,
'state': {},
}
self.request.session['auth'] = session
self.request.session.modified = True
def get_redirect_url(self):
return self.request.build_absolute_uri(reverse('sentry-auth-sso'))
def clear_session(self):
if 'auth' in self.request.session:
del self.request.session['auth']
self.request.session.modified = True
def current_step(self):
"""
Render the current step.
"""
session = self.request.session['auth']
idx = session['idx']
if idx == len(self.pipeline):
return self.finish_pipeline()
return self.pipeline[idx].dispatch(self.request, self)
def next_step(self):
"""
Render the next step.
"""
self.request.session['auth']['idx'] += 1
self.request.session.modified = True
return self.current_step()
def finish_pipeline(self):
session = self.request.session['auth']
state = session['state']
identity = self.provider.build_identity(state)
if session['flow'] == self.FLOW_LOGIN:
# create identity and authenticate the user
response = self._finish_login_pipeline(identity)
elif session['flow'] == self.FLOW_SETUP_PROVIDER:
response = self._finish_setup_pipeline(identity)
elif session['flow'] == self.FLOW_LINK_IDENTITY:
# create identity and authenticate the user
response = self._finish_link_pipeline(identity)
return response
@transaction.atomic
def _handle_attach_identity(self, identity, member=None):
"""
Given an already authenticated user, attach or re-attach and identity.
"""
auth_provider = self.auth_provider
request = self.request
user = request.user
organization = self.organization
try:
auth_identity = AuthIdentity.objects.get(
auth_provider=auth_provider,
ident=identity['id'],
)
except AuthIdentity.DoesNotExist:
auth_identity = AuthIdentity.objects.create(
auth_provider=auth_provider,
user=user,
ident=identity['id'],
data=identity.get('data', {}),
)
auth_is_new = True
else:
now = timezone.now()
auth_identity.update(
user=user,
data=identity.get('data', {}),
last_verified=now,
last_synced=now,
)
auth_is_new = False
if member is None:
try:
member = OrganizationMember.objects.get(
user=user,
organization=organization,
)
except OrganizationMember.DoesNotExist:
member = OrganizationMember.objects.create(
organization=organization,
type=auth_provider.default_role,
has_global_access=auth_provider.default_global_access,
user=user,
flags=getattr(OrganizationMember.flags, 'sso:linked'),
)
default_teams = auth_provider.default_teams.all()
for team in default_teams:
OrganizationMemberTeam.objects.create(
team=team,
organizationmember=member,
)
AuditLogEntry.objects.create(
organization=organization,
actor=user,
ip_address=request.META['REMOTE_ADDR'],
target_object=member.id,
target_user=user,
event=AuditLogEntryEvent.MEMBER_ADD,
data=member.get_audit_log_data(),
)
if getattr(member.flags, 'sso:invalid') or not getattr(member.flags, 'sso:linked'):
setattr(member.flags, 'sso:invalid', False)
setattr(member.flags, 'sso:linked', True)
member.save()
if auth_is_new:
AuditLogEntry.objects.create(
organization=organization,
actor=user,
ip_address=request.META['REMOTE_ADDR'],
target_object=auth_identity.id,
event=AuditLogEntryEvent.SSO_IDENTITY_LINK,
data=auth_identity.get_audit_log_data(),
)
messages.add_message(
request, messages.SUCCESS,
OK_LINK_IDENTITY,
)
return auth_identity
def _handle_new_user(self, identity):
auth_provider = self.auth_provider
organization = self.organization
request = self.request
user = User.objects.create(
username=uuid4().hex,
email=identity['email'],
first_name=identity.get('name', ''),
is_managed=True,
)
auth_identity = AuthIdentity.objects.create(
auth_provider=auth_provider,
user=user,
ident=identity['id'],
data=identity.get('data', {}),
)
om = OrganizationMember.objects.create(
organization=organization,
type=auth_provider.default_role,
has_global_access=auth_provider.default_global_access,
user=user,
flags=getattr(OrganizationMember.flags, 'sso:linked'),
)
default_teams = auth_provider.default_teams.all()
for team in default_teams:
om.teams.add(team)
AuditLogEntry.objects.create(
organization=organization,
actor=user,
ip_address=request.META['REMOTE_ADDR'],
target_object=om.id,
target_user=om.user,
event=AuditLogEntryEvent.MEMBER_ADD,
data=om.get_audit_log_data(),
)
return auth_identity
@transaction.atomic
def _finish_login_pipeline(self, identity):
"""
The login flow executes both with anonymous and authenticated users.
Upon completion a few branches exist:
If the identity is already linked, the user should be logged in
and redirected immediately.
Otherwise, the user is presented with a confirmation window. That window
will show them the new account that will be created, and if they're
already authenticated an optional button to associate the identity with
their account.
"""
auth_provider = self.auth_provider
request = self.request
try:
auth_identity = AuthIdentity.objects.get(
auth_provider=auth_provider,
ident=identity['id'],
)
except AuthIdentity.DoesNotExist:
if request.POST.get('op') == 'confirm' and request.user.is_authenticated():
auth_identity = self._handle_attach_identity(identity)
elif request.POST.get('op') == 'newuser':
auth_identity = self._handle_new_user(identity)
else:
if request.user.is_authenticated():
return self.respond('sentry/auth-confirm-link.html', {
'identity': identity,
})
return self.respond('sentry/auth-confirm-identity.html', {
'identity': identity,
})
else:
# TODO(dcramer): this is very similar to attach
now = timezone.now()
auth_identity.update(
data=identity.get('data', {}),
last_verified=now,
last_synced=now,
)
member = OrganizationMember.objects.get(
user=auth_identity.user,
organization=self.organization,
)
if getattr(member.flags, 'sso:invalid') or not getattr(member.flags, 'sso:linked'):
setattr(member.flags, 'sso:invalid', False)
setattr(member.flags, 'sso:linked', True)
member.save()
user = auth_identity.user
user.backend = settings.AUTHENTICATION_BACKENDS[0]
login(self.request, user)
self.clear_session()
return HttpResponseRedirect(get_login_redirect(self.request))
@transaction.atomic
def _finish_setup_pipeline(self, identity):
"""
The setup flow creates the auth provider as well as an identity linked
to the active user.
"""
request = self.request
if not request.user.is_authenticated():
return self.error(ERR_NOT_AUTHED)
if request.user.id != request.session['auth']['uid']:
return self.error(ERR_UID_MISMATCH)
state = request.session['auth']['state']
config = self.provider.build_config(state)
try:
om = OrganizationMember.objects.get(
user=request.user,
organization=self.organization,
)
except OrganizationMember.DoesNotExist:
return self.error(ERR_UID_MISMATCH)
self.auth_provider = AuthProvider.objects.create(
organization=self.organization,
provider=self.provider.key,
config=config,
)
self._handle_attach_identity(identity, om)
AuditLogEntry.objects.create(
organization=self.organization,
actor=request.user,
ip_address=request.META['REMOTE_ADDR'],
target_object=self.auth_provider.id,
event=AuditLogEntryEvent.SSO_ENABLE,
data=self.auth_provider.get_audit_log_data(),
)
member_list = OrganizationMember.objects.filter(
organization=self.organization,
flags=~getattr(OrganizationMember.flags, 'sso:linked'),
)
for member in member_list:
member.send_sso_link_email()
messages.add_message(
self.request, messages.SUCCESS,
OK_SETUP_SSO,
)
self.clear_session()
next_uri = reverse('sentry-organization-auth-settings', args=[
self.organization.slug,
])
return HttpResponseRedirect(next_uri)
@transaction.atomic
def _finish_link_pipeline(self, identity):
"""
The link flow shows the user a confirmation of the link that is about
to be created, and upon confirmation associates the identity.
"""
request = self.request
if not request.user.is_authenticated():
return self.error(ERR_NOT_AUTHED)
if request.user.id != request.session['auth']['uid']:
return self.error(ERR_UID_MISMATCH)
if request.POST.get('op') == 'confirm':
self._handle_attach_identity(identity)
elif request.POST.get('op') == 'newuser':
auth_identity = self._handle_new_user(identity)
user = auth_identity.user
user.backend = settings.AUTHENTICATION_BACKENDS[0]
login(self.request, user)
else:
return self.respond('sentry/auth-confirm-link.html', {
'identity': identity,
})
self.clear_session()
next_uri = reverse('sentry-organization-home', args=[
self.organization.slug,
])
return HttpResponseRedirect(next_uri)
def respond(self, template, context=None, status=200):
default_context = {
'organization': self.organization,
}
if context:
default_context.update(context)
return render_to_response(template, default_context, self.request,
status=status)
def error(self, message):
session = self.request.session['auth']
if session['flow'] == self.FLOW_LOGIN:
# create identity and authenticate the user
redirect_uri = reverse('sentry-auth-organization', args=[self.organization.slug])
elif session['flow'] == self.FLOW_SETUP_PROVIDER:
redirect_uri = reverse('sentry-organization-auth-settings', args=[self.organization.slug])
elif session['flow'] == self.FLOW_LINK_IDENTITY:
redirect_uri = reverse('sentry-auth-organization', args=[self.organization.slug])
messages.add_message(
self.request, messages.ERROR,
u'Authentication error: {}'.format(message),
)
return HttpResponseRedirect(redirect_uri)
def bind_state(self, key, value):
self.request.session['auth']['state'][key] = value
self.request.session.modified = True
def fetch_state(self, key):
return self.request.session['auth']['state'].get(key)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
import django
from django.conf import settings
from django.core import exceptions
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from django.utils import html
from mox3.mox import IsA
import six
from heatclient.common import template_format as hc_format
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.stacks import api as project_api
from openstack_dashboard.dashboards.project.stacks import forms
from openstack_dashboard.dashboards.project.stacks import mappings
from openstack_dashboard.dashboards.project.stacks import tables
INDEX_TEMPLATE = 'horizon/common/_data_table_view.html'
INDEX_URL = reverse('horizon:project:stacks:index')
DETAIL_URL = 'horizon:project:stacks:detail'
class MockResource(object):
def __init__(self, resource_type, physical_resource_id):
self.resource_type = resource_type
self.physical_resource_id = physical_resource_id
class MappingsTests(test.TestCase):
def test_mappings(self):
def assertMappingUrl(url, resource_type, physical_resource_id):
mock = MockResource(resource_type, physical_resource_id)
mock_url = mappings.resource_to_url(mock)
self.assertEqual(url, mock_url)
assertMappingUrl(
'/project/networks/subnets/aaa/detail',
'OS::Neutron::Subnet',
'aaa')
assertMappingUrl(
None,
'OS::Neutron::Subnet',
None)
assertMappingUrl(
None,
None,
None)
assertMappingUrl(
None,
'AWS::AutoScaling::LaunchConfiguration',
'aaa')
assertMappingUrl(
'/project/instances/aaa/',
'AWS::EC2::Instance',
'aaa')
assertMappingUrl(
'/project/containers/container/aaa/',
'OS::Swift::Container',
'aaa')
assertMappingUrl(
None,
'Foo::Bar::Baz',
'aaa')
assertMappingUrl(
'/project/instances/aaa/',
'OS::Nova::Server',
'aaa')
assertMappingUrl(
'/project/stacks/stack/aaa/',
'OS::Heat::ResourceGroup',
'aaa')
def test_stack_output(self):
self.assertEqual(u'<pre>foo</pre>', mappings.stack_output('foo'))
self.assertEqual(u'', mappings.stack_output(None))
outputs = ['one', 'two', 'three']
# On Python 3, the pretty JSON output doesn't add space before newline
if six.PY3:
expected_text = """[\n "one",\n "two",\n "three"\n]"""
else:
expected_text = """[\n "one", \n "two", \n "three"\n]"""
self.assertEqual(u'<pre>%s</pre>' % html.escape(expected_text),
mappings.stack_output(outputs))
outputs = {'foo': 'bar'}
expected_text = """{\n "foo": "bar"\n}"""
self.assertEqual(u'<pre>%s</pre>' % html.escape(expected_text),
mappings.stack_output(outputs))
self.assertEqual(
u'<a href="http://www.example.com/foo" target="_blank">'
'http://www.example.com/foo</a>',
mappings.stack_output('http://www.example.com/foo'))
class StackTests(test.TestCase):
@override_settings(API_RESULT_PAGE_SIZE=2)
@test.create_stubs({api.heat: ('stacks_list',)})
def test_index_paginated(self):
stacks = self.stacks.list()[:5]
filters = {}
api.heat.stacks_list(IsA(http.HttpRequest),
marker=None,
paginate=True,
sort_dir='desc',
filters=filters) \
.AndReturn([stacks, True, True])
api.heat.stacks_list(IsA(http.HttpRequest),
marker=None,
paginate=True,
sort_dir='desc',
filters=filters) \
.AndReturn([stacks[:2], True, True])
api.heat.stacks_list(IsA(http.HttpRequest),
marker=stacks[2].id,
paginate=True,
sort_dir='desc',
filters=filters) \
.AndReturn([stacks[2:4], True, True])
api.heat.stacks_list(IsA(http.HttpRequest),
marker=stacks[4].id,
paginate=True,
sort_dir='desc',
filters=filters) \
.AndReturn([stacks[4:], True, True])
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:index')
res = self.client.get(url)
# get all
self.assertEqual(len(res.context['stacks_table'].data),
len(stacks))
self.assertTemplateUsed(res, INDEX_TEMPLATE)
res = self.client.get(url)
# get first page with 2 items
self.assertEqual(len(res.context['stacks_table'].data),
settings.API_RESULT_PAGE_SIZE)
url = "%s?%s=%s" % (reverse('horizon:project:stacks:index'),
tables.StacksTable._meta.pagination_param,
stacks[2].id)
res = self.client.get(url)
# get second page (items 2-4)
self.assertEqual(len(res.context['stacks_table'].data),
settings.API_RESULT_PAGE_SIZE)
url = "%s?%s=%s" % (reverse('horizon:project:stacks:index'),
tables.StacksTable._meta.pagination_param,
stacks[4].id)
res = self.client.get(url)
# get third page (item 5)
self.assertEqual(len(res.context['stacks_table'].data),
1)
@override_settings(API_RESULT_PAGE_SIZE=2)
@test.create_stubs({api.heat: ('stacks_list',)})
def test_index_prev_paginated(self):
stacks = self.stacks.list()[:3]
filters = {}
api.heat.stacks_list(IsA(http.HttpRequest),
marker=None,
paginate=True,
sort_dir='desc',
filters=filters) \
.AndReturn([stacks, True, False])
api.heat.stacks_list(IsA(http.HttpRequest),
marker=None,
paginate=True,
sort_dir='desc',
filters=filters) \
.AndReturn([stacks[:2], True, True])
api.heat.stacks_list(IsA(http.HttpRequest),
marker=stacks[2].id,
paginate=True,
sort_dir='desc',
filters=filters) \
.AndReturn([stacks[2:], True, True])
api.heat.stacks_list(IsA(http.HttpRequest),
marker=stacks[2].id,
paginate=True,
sort_dir='asc',
filters=filters) \
.AndReturn([stacks[:2], True, True])
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:index')
res = self.client.get(url)
# get all
self.assertEqual(len(res.context['stacks_table'].data),
len(stacks))
self.assertTemplateUsed(res, INDEX_TEMPLATE)
res = self.client.get(url)
# get first page with 2 items
self.assertEqual(len(res.context['stacks_table'].data),
settings.API_RESULT_PAGE_SIZE)
url = "%s?%s=%s" % (reverse('horizon:project:stacks:index'),
tables.StacksTable._meta.pagination_param,
stacks[2].id)
res = self.client.get(url)
# get second page (item 3)
self.assertEqual(len(res.context['stacks_table'].data), 1)
url = "%s?%s=%s" % (reverse('horizon:project:stacks:index'),
tables.StacksTable._meta.prev_pagination_param,
stacks[2].id)
res = self.client.get(url)
# prev back to get first page with 2 pages
self.assertEqual(len(res.context['stacks_table'].data),
settings.API_RESULT_PAGE_SIZE)
@test.create_stubs({api.heat: ('stack_create', 'template_validate'),
api.neutron: ('network_list_for_tenant', )})
def test_launch_stack(self):
template = self.stack_templates.first()
stack = self.stacks.first()
api.heat.template_validate(IsA(http.HttpRequest),
files={},
template=hc_format.parse(template.data)) \
.AndReturn(json.loads(template.validate))
api.heat.stack_create(IsA(http.HttpRequest),
stack_name=stack.stack_name,
timeout_mins=60,
disable_rollback=True,
template=None,
parameters=IsA(dict),
password='password',
files=None)
api.neutron.network_list_for_tenant(IsA(http.HttpRequest),
self.tenant.id) \
.AndReturn(self.networks.list())
api.neutron.network_list_for_tenant(IsA(http.HttpRequest),
self.tenant.id) \
.AndReturn(self.networks.list())
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:select_template')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/stacks/select_template.html')
form_data = {'template_source': 'raw',
'template_data': template.data,
'method': forms.TemplateForm.__name__}
res = self.client.post(url, form_data)
self.assertTemplateUsed(res, 'project/stacks/create.html')
url = reverse('horizon:project:stacks:launch')
form_data = {'template_source': 'raw',
'template_data': template.data,
'password': 'password',
'parameters': template.validate,
'stack_name': stack.stack_name,
"timeout_mins": 60,
"disable_rollback": True,
"__param_DBUsername": "admin",
"__param_LinuxDistribution": "F17",
"__param_InstanceType": "m1.small",
"__param_KeyName": "test",
"__param_DBPassword": "admin",
"__param_DBRootPassword": "admin",
"__param_DBName": "wordpress",
"__param_Network": self.networks.list()[0]['id'],
'method': forms.CreateStackForm.__name__}
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.heat: ('stack_create', 'template_validate'),
api.neutron: ('network_list_for_tenant', )})
def test_launch_stack_with_environment(self):
template = self.stack_templates.first()
environment = self.stack_environments.first()
stack = self.stacks.first()
api.heat.template_validate(IsA(http.HttpRequest),
files={},
template=hc_format.parse(template.data),
environment=environment.data) \
.AndReturn(json.loads(template.validate))
api.heat.stack_create(IsA(http.HttpRequest),
stack_name=stack.stack_name,
timeout_mins=60,
disable_rollback=True,
template=None,
environment=environment.data,
parameters=IsA(dict),
password='password',
files=None)
api.neutron.network_list_for_tenant(IsA(http.HttpRequest),
self.tenant.id) \
.AndReturn(self.networks.list())
api.neutron.network_list_for_tenant(IsA(http.HttpRequest),
self.tenant.id) \
.AndReturn(self.networks.list())
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:select_template')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/stacks/select_template.html')
form_data = {'template_source': 'raw',
'template_data': template.data,
'environment_source': 'raw',
'environment_data': environment.data,
'method': forms.TemplateForm.__name__}
res = self.client.post(url, form_data)
self.assertTemplateUsed(res, 'project/stacks/create.html')
url = reverse('horizon:project:stacks:launch')
form_data = {'template_source': 'raw',
'template_data': template.data,
'environment_source': 'raw',
'environment_data': environment.data,
'password': 'password',
'parameters': template.validate,
'stack_name': stack.stack_name,
"timeout_mins": 60,
"disable_rollback": True,
"__param_DBUsername": "admin",
"__param_LinuxDistribution": "F17",
"__param_InstanceType": "m1.small",
"__param_KeyName": "test",
"__param_DBPassword": "admin",
"__param_DBRootPassword": "admin",
"__param_DBName": "wordpress",
"__param_Network": self.networks.list()[0]['id'],
'method': forms.CreateStackForm.__name__}
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.heat: ('template_validate',)})
def test_launch_stack_with_hidden_parameters(self):
template = {
'data': ('heat_template_version: 2013-05-23\n'
'parameters:\n'
' public_string:\n'
' type: string\n'
' secret_string:\n'
' type: string\n'
' hidden: true\n'),
'validate': {
'Description': 'No description',
'Parameters': {
'public_string': {
'Label': 'public_string',
'Description': '',
'Type': 'String',
'NoEcho': 'false'
},
'secret_string': {
'Label': 'secret_string',
'Description': '',
'Type': 'String',
'NoEcho': 'true'
}
}
}
}
api.heat.template_validate(IsA(http.HttpRequest),
files={},
template=hc_format.parse(template['data'])) \
.AndReturn(template['validate'])
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:select_template')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/stacks/select_template.html')
form_data = {'template_source': 'raw',
'template_data': template['data'],
'method': forms.TemplateForm.__name__}
res = self.client.post(url, form_data)
self.assertTemplateUsed(res, 'project/stacks/create.html')
# ensure the fields were rendered correctly
if django.VERSION >= (1, 10):
pattern = ('<input class="form-control" '
'id="id___param_public_string" '
'name="__param_public_string" type="text" required/>')
secret = ('<input class="form-control" '
'id="id___param_secret_string" '
'name="__param_secret_string" '
'type="password" required>')
else:
pattern = ('<input class="form-control" '
'id="id___param_public_string" '
'name="__param_public_string" type="text" />')
secret = ('<input class="form-control" '
'id="id___param_secret_string" '
'name="__param_secret_string" '
'type="password" />')
self.assertContains(res, pattern, html=True)
self.assertContains(res, secret, html=True)
@test.create_stubs({api.heat: ('template_validate',)})
def test_launch_stack_with_parameter_group(self):
template = {
'data': ('heat_template_version: 2013-05-23\n'
'parameters:\n'
' last_param:\n'
' type: string\n'
' first_param:\n'
' type: string\n'
' middle_param:\n'
' type: string\n'
'parameter_groups:\n'
'- parameters:\n'
' - first_param\n'
' - middle_param\n'
' - last_param\n'),
'validate': {
'Description': 'No description',
'Parameters': {
'last_param': {
'Label': 'last_param',
'Description': '',
'Type': 'String',
'NoEcho': 'false'
},
'first_param': {
'Label': 'first_param',
'Description': '',
'Type': 'String',
'NoEcho': 'false'
},
'middle_param': {
'Label': 'middle_param',
'Description': '',
'Type': 'String',
'NoEcho': 'true'
}
},
'ParameterGroups': [
{
'parameters': [
'first_param',
'middle_param',
'last_param'
]
}
]
}
}
api.heat.template_validate(IsA(http.HttpRequest),
files={},
template=hc_format.parse(template['data'])) \
.AndReturn(template['validate'])
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:select_template')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/stacks/select_template.html')
form_data = {'template_source': 'raw',
'template_data': template['data'],
'method': forms.TemplateForm.__name__}
res = self.client.post(url, form_data)
self.assertTemplateUsed(res, 'project/stacks/create.html')
# ensure the fields were rendered in the correct order
regex = re.compile('^.*>first_param<.*>middle_param<.*>last_param<.*$',
flags=re.DOTALL)
self.assertRegexpMatches(res.content.decode('utf-8'), regex)
@test.create_stubs({api.heat: ('stack_create', 'template_validate')})
def test_launch_stack_parameter_types(self):
template = {
'data': ('heat_template_version: 2013-05-23\n'
'parameters:\n'
' param1:\n'
' type: string\n'
' param2:\n'
' type: number\n'
' param3:\n'
' type: json\n'
' param4:\n'
' type: comma_delimited_list\n'
' param5:\n'
' type: boolean\n'),
'validate': {
"Description": "No description",
"Parameters": {
"param1": {
"Type": "String",
"NoEcho": "false",
"Description": "",
"Label": "param1"
},
"param2": {
"Type": "Number",
"NoEcho": "false",
"Description": "",
"Label": "param2"
},
"param3": {
"Type": "Json",
"NoEcho": "false",
"Description": "",
"Label": "param3"
},
"param4": {
"Type": "CommaDelimitedList",
"NoEcho": "false",
"Description": "",
"Label": "param4"
},
"param5": {
"Type": "Boolean",
"NoEcho": "false",
"Description": "",
"Label": "param5"
}
}
}
}
stack = self.stacks.first()
api.heat.template_validate(IsA(http.HttpRequest),
files={},
template=hc_format.parse(template['data'])) \
.AndReturn(template['validate'])
api.heat.stack_create(IsA(http.HttpRequest),
stack_name=stack.stack_name,
timeout_mins=60,
disable_rollback=True,
template=hc_format.parse(template['data']),
parameters={'param1': 'some string',
'param2': 42,
'param3': '{"key": "value"}',
'param4': 'a,b,c',
'param5': True},
password='password',
files={})
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:select_template')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/stacks/select_template.html')
form_data = {'template_source': 'raw',
'template_data': template['data'],
'method': forms.TemplateForm.__name__}
res = self.client.post(url, form_data)
self.assertTemplateUsed(res, 'project/stacks/create.html')
# ensure the fields were rendered correctly
if django.VERSION >= (1, 10):
input_str = ('<input class="form-control" '
'id="id___param_param{0}" '
'name="__param_param{0}" type="{1}" required/>')
else:
input_str = ('<input class="form-control" '
'id="id___param_param{0}" '
'name="__param_param{0}" type="{1}"/>')
self.assertContains(res, input_str.format(1, 'text'), html=True)
# the custom number spinner produces an input element
# that doesn't match the input_strs above
# validate with id alone
self.assertContains(res, 'id="id___param_param2"')
self.assertContains(res, input_str.format(3, 'text'), html=True)
self.assertContains(res, input_str.format(4, 'text'), html=True)
self.assertContains(
res,
'<input id="id___param_param5" name="__param_param5" '
'type="checkbox">',
html=True)
# post some sample data and make sure it validates
url = reverse('horizon:project:stacks:launch')
form_data = {'template_source': 'raw',
'template_data': template['data'],
'password': 'password',
'parameters': json.dumps(template['validate']),
'stack_name': stack.stack_name,
"timeout_mins": 60,
"disable_rollback": True,
"__param_param1": "some string",
"__param_param2": 42,
"__param_param3": '{"key": "value"}',
"__param_param4": "a,b,c",
"__param_param5": True,
'method': forms.CreateStackForm.__name__}
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.heat: ('stack_update', 'stack_get', 'template_get',
'template_validate'),
api.neutron: ('network_list_for_tenant', )})
def test_edit_stack_template(self):
template = self.stack_templates.first()
stack = self.stacks.first()
# GET to template form
api.heat.stack_get(IsA(http.HttpRequest),
stack.id).AndReturn(stack)
# POST template form, validation
api.heat.template_validate(IsA(http.HttpRequest),
files={},
template=hc_format.parse(template.data)) \
.AndReturn(json.loads(template.validate))
# GET to edit form
api.heat.stack_get(IsA(http.HttpRequest),
stack.id).AndReturn(stack)
api.heat.template_get(IsA(http.HttpRequest),
stack.id) \
.AndReturn(json.loads(template.validate))
# POST to edit form
api.heat.stack_get(IsA(http.HttpRequest),
stack.id).AndReturn(stack)
fields = {
'stack_name': stack.stack_name,
'disable_rollback': True,
'timeout_mins': 61,
'password': 'password',
'template': None,
'parameters': IsA(dict),
'files': None
}
api.heat.stack_update(IsA(http.HttpRequest),
stack_id=stack.id,
**fields)
api.neutron.network_list_for_tenant(IsA(http.HttpRequest),
self.tenant.id) \
.AndReturn(self.networks.list())
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:change_template',
args=[stack.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/stacks/change_template.html')
form_data = {'template_source': 'raw',
'template_data': template.data,
'method': forms.ChangeTemplateForm.__name__}
res = self.client.post(url, form_data)
url = reverse('horizon:project:stacks:edit_stack',
args=[stack.id, ])
form_data = {'template_source': 'raw',
'template_data': template.data,
'password': 'password',
'parameters': template.validate,
'stack_name': stack.stack_name,
'stack_id': stack.id,
"timeout_mins": 61,
"disable_rollback": True,
"__param_DBUsername": "admin",
"__param_LinuxDistribution": "F17",
"__param_InstanceType": "m1.small",
"__param_KeyName": "test",
"__param_DBPassword": "admin",
"__param_DBRootPassword": "admin",
"__param_DBName": "wordpress",
"__param_Network": self.networks.list()[0]['id'],
'method': forms.EditStackForm.__name__}
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_launch_stack_form_invalid_name_digit(self):
self._test_launch_stack_invalid_name('2_StartWithDigit')
def test_launch_stack_form_invalid_name_underscore(self):
self._test_launch_stack_invalid_name('_StartWithUnderscore')
def test_launch_stack_form_invalid_name_point(self):
self._test_launch_stack_invalid_name('.StartWithPoint')
@test.create_stubs({api.neutron: ('network_list_for_tenant', )})
def _test_launch_stack_invalid_name(self, name):
api.neutron.network_list_for_tenant(IsA(http.HttpRequest),
self.tenant.id) \
.AndReturn(self.networks.list())
self.mox.ReplayAll()
template = self.stack_templates.first()
url = reverse('horizon:project:stacks:launch')
form_data = {'template_source': 'raw',
'template_data': template.data,
'password': 'password',
'parameters': template.validate,
'stack_name': name,
"timeout_mins": 60,
"disable_rollback": True,
"__param_DBUsername": "admin",
"__param_LinuxDistribution": "F17",
"__param_InstanceType": "m1.small",
"__param_KeyName": "test",
"__param_DBPassword": "admin",
"__param_DBRootPassword": "admin",
"__param_DBName": "wordpress",
"__param_Network": self.networks.list()[0]['id'],
'method': forms.CreateStackForm.__name__}
res = self.client.post(url, form_data)
error = ('Name must start with a letter and may only contain letters, '
'numbers, underscores, periods and hyphens.')
self.assertFormErrors(res, 1)
self.assertFormError(res, "form", 'stack_name', error)
def _test_stack_action(self, action):
stack = self.stacks.first()
filters = {}
api.heat.stacks_list(IsA(http.HttpRequest),
marker=None,
paginate=True,
sort_dir='desc',
filters=filters) \
.AndReturn([self.stacks.list(), True, True])
getattr(api.heat, 'action_%s' % action)(IsA(http.HttpRequest),
stack.id).AndReturn(stack)
self.mox.ReplayAll()
form_data = {"action": "stacks__%s__%s" % (action, stack.id)}
res = self.client.post(INDEX_URL, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.heat: ('stacks_list', 'action_check',)})
def test_check_stack(self):
self._test_stack_action('check')
@test.create_stubs({api.heat: ('stacks_list', 'action_suspend',)})
def test_suspend_stack(self):
self._test_stack_action('suspend')
@test.create_stubs({api.heat: ('stacks_list', 'action_resume',)})
def test_resume_stack(self):
self._test_stack_action('resume')
@test.create_stubs({api.heat: ('stack_preview', 'template_validate')})
def test_preview_stack(self):
template = self.stack_templates.first()
stack = self.stacks.first()
api.heat.template_validate(IsA(http.HttpRequest),
files={},
template=hc_format.parse(template.data)) \
.AndReturn(json.loads(template.validate))
api.heat.stack_preview(IsA(http.HttpRequest),
stack_name=stack.stack_name,
timeout_mins=60,
disable_rollback=True,
template=None,
parameters=IsA(dict),
files=None).AndReturn(stack)
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:preview_template')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/stacks/preview_template.html')
form_data = {'template_source': 'raw',
'template_data': template.data,
'method': forms.PreviewTemplateForm.__name__}
res = self.client.post(url, form_data)
self.assertTemplateUsed(res, 'project/stacks/preview.html')
url = reverse('horizon:project:stacks:preview')
form_data = {'template_source': 'raw',
'template_data': template.data,
'parameters': template.validate,
'stack_name': stack.stack_name,
"timeout_mins": 60,
"disable_rollback": True,
"__param_DBUsername": "admin",
"__param_LinuxDistribution": "F17",
"__param_InstanceType": "m1.small",
"__param_KeyName": "test",
"__param_DBPassword": "admin",
"__param_DBRootPassword": "admin",
"__param_DBName": "wordpress",
'method': forms.PreviewStackForm.__name__}
res = self.client.post(url, form_data)
self.assertTemplateUsed(res, 'project/stacks/preview_details.html')
self.assertEqual(res.context['stack_preview']['stack_name'],
stack.stack_name)
@test.create_stubs({api.heat: ('stack_get', 'template_get',
'resources_list')})
def test_detail_stack_topology(self):
stack = self.stacks.first()
template = self.stack_templates.first()
api.heat.stack_get(IsA(http.HttpRequest), stack.id) \
.MultipleTimes().AndReturn(stack)
api.heat.template_get(IsA(http.HttpRequest), stack.id) \
.AndReturn(json.loads(template.validate))
api.heat.resources_list(IsA(http.HttpRequest), stack.stack_name) \
.AndReturn([])
self.mox.ReplayAll()
url = '?'.join([reverse(DETAIL_URL, args=[stack.id]),
'='.join(['tab', 'stack_details__stack_topology'])])
res = self.client.get(url)
tab = res.context['tab_group'].get_tab('topology')
d3_data = tab.data['d3_data']
self.assertEqual(tab.template_name,
'project/stacks/_detail_topology.html')
# status is CREATE_COMPLETE, so we expect the topology to display it
self.assertIn('info_box', d3_data)
self.assertIn('stack-green.svg', d3_data)
self.assertIn('Create Complete', d3_data)
@test.create_stubs({api.heat: ('stack_get', 'template_get'),
project_api: ('d3_data',)})
def test_detail_stack_overview(self):
stack = self.stacks.first()
template = self.stack_templates.first()
api.heat.stack_get(IsA(http.HttpRequest), stack.id) \
.MultipleTimes().AndReturn(stack)
api.heat.template_get(IsA(http.HttpRequest), stack.id) \
.AndReturn(json.loads(template.validate))
project_api.d3_data(IsA(http.HttpRequest), stack_id=stack.id) \
.AndReturn(json.dumps({"nodes": [], "stack": {}}))
self.mox.ReplayAll()
url = '?'.join([reverse(DETAIL_URL, args=[stack.id]),
'='.join(['tab', 'stack_details__stack_overview'])])
res = self.client.get(url)
tab = res.context['tab_group'].get_tab('overview')
overview_data = tab.data['stack']
self.assertEqual(tab.template_name,
'project/stacks/_detail_overview.html')
self.assertEqual(stack.stack_name, overview_data.stack_name)
@test.create_stubs({api.heat: ('stack_get', 'template_get'),
project_api: ('d3_data',)})
def test_detail_stack_resources(self):
stack = self.stacks.first()
template = self.stack_templates.first()
api.heat.stack_get(IsA(http.HttpRequest), stack.id) \
.MultipleTimes().AndReturn(stack)
api.heat.template_get(IsA(http.HttpRequest), stack.id) \
.AndReturn(json.loads(template.validate))
project_api.d3_data(IsA(http.HttpRequest), stack_id=stack.id) \
.AndReturn(json.dumps({"nodes": [], "stack": {}}))
self.mox.ReplayAll()
url = '?'.join([reverse(DETAIL_URL, args=[stack.id]),
'='.join(['tab', 'stack_details__resource_overview'])])
res = self.client.get(url)
tab = res.context['tab_group'].get_tab('resources')
self.assertEqual(tab.template_name,
'project/stacks/_detail_resources.html')
@test.create_stubs({api.heat: ('stack_get', 'template_get')})
def test_detail_stack_template(self):
stack = self.stacks.first()
template = self.stack_templates.first()
api.heat.stack_get(IsA(http.HttpRequest), stack.id) \
.AndReturn(stack)
api.heat.template_get(IsA(http.HttpRequest), stack.id) \
.AndReturn(json.loads(template.validate))
self.mox.ReplayAll()
url = '?'.join([reverse(DETAIL_URL, args=[stack.id]),
'='.join(['tab', 'stack_details__stack_template'])])
res = self.client.get(url)
tab = res.context['tab_group'].get_tab('stack_template')
template_data = tab.data['stack_template']
self.assertEqual(tab.template_name,
'project/stacks/_stack_template.html')
self.assertIn(json.loads(template.validate)['Description'],
template_data)
@test.create_stubs({api.heat: ('resource_get', 'resource_metadata_get')})
def test_resource_view(self):
stack = self.stacks.first()
resource = self.heat_resources.first()
metadata = {}
api.heat.resource_get(
IsA(http.HttpRequest), stack.id, resource.resource_name) \
.AndReturn(resource)
api.heat.resource_metadata_get(
IsA(http.HttpRequest), stack.id, resource.resource_name) \
.AndReturn(metadata)
self.mox.ReplayAll()
url = reverse('horizon:project:stacks:resource',
args=[stack.id, resource.resource_name])
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertTemplateUsed(res, 'project/stacks/_resource_overview.html')
self.assertEqual(res.context['resource'].logical_resource_id,
resource.logical_resource_id)
class TemplateFormTests(test.TestCase):
class SimpleFile(object):
def __init__(self, name, data):
self.name = name
self.data = data
def read(self):
return self.data
def test_create_upload_form_attributes(self):
attrs = forms.create_upload_form_attributes(
'env', 'url', 'Environment')
self.assertEqual(attrs['data-envsource-url'], 'Environment')
def test_clean_file_upload_form_url(self):
kwargs = {'next_view': 'Launch Stack'}
t = forms.TemplateForm({}, **kwargs)
precleaned = {
'template_url': 'http://templateurl.com',
}
t.clean_uploaded_files('template', 'template', precleaned, {})
self.assertEqual(precleaned['template_url'], 'http://templateurl.com')
def test_clean_file_upload_form_multiple(self):
kwargs = {'next_view': 'Launch Stack'}
t = forms.TemplateForm({}, **kwargs)
precleaned = {
'template_url': 'http://templateurl.com',
'template_data': 'http://templateurl.com',
}
self.assertRaises(
exceptions.ValidationError,
t.clean_uploaded_files,
'template',
'template',
precleaned,
{})
def test_clean_file_upload_form_invalid_json(self):
kwargs = {'next_view': 'Launch Stack'}
t = forms.TemplateForm({}, **kwargs)
precleaned = {
'template_data': 'http://templateurl.com',
}
json_str = '{notvalidjson::::::json/////json'
files = {'template_upload':
self.SimpleFile('template_name', json_str)}
self.assertRaises(
exceptions.ValidationError,
t.clean_uploaded_files,
'template',
'template',
precleaned,
files)
def test_clean_file_upload_form_valid_data(self):
kwargs = {'next_view': 'Launch Stack'}
t = forms.TemplateForm({}, **kwargs)
precleaned = {
'template_data': 'http://templateurl.com',
}
json_str = '{"isvalid":"json"}'
files = {'template_upload':
self.SimpleFile('template_name', json_str)}
t.clean_uploaded_files('template', 'template', precleaned, files)
self.assertEqual(
json_str,
precleaned['template_data'])
|
|
import collections
from supriya import CalculationRate, utils
from supriya.synthdefs import PseudoUGen, UGen
class Mix(PseudoUGen):
"""
A down-to-mono signal mixer.
.. container:: example
::
>>> with supriya.synthdefs.SynthDefBuilder() as builder:
... oscillators = [supriya.ugens.DC.ar(1) for _ in range(5)]
... mix = supriya.ugens.Mix.new(oscillators)
...
>>> synthdef = builder.build(name="mix1", optimize=False)
>>> supriya.graph(synthdef) # doctest: +SKIP
::
>>> print(synthdef)
synthdef:
name: mix1
ugens:
- DC.ar/0:
source: 1.0
- DC.ar/1:
source: 1.0
- DC.ar/2:
source: 1.0
- DC.ar/3:
source: 1.0
- Sum4.ar:
input_four: DC.ar/3[0]
input_one: DC.ar/0[0]
input_three: DC.ar/2[0]
input_two: DC.ar/1[0]
- DC.ar/4:
source: 1.0
- BinaryOpUGen(ADDITION).ar:
left: Sum4.ar[0]
right: DC.ar/4[0]
.. container:: example
::
>>> with supriya.synthdefs.SynthDefBuilder() as builder:
... oscillators = [supriya.ugens.DC.ar(1) for _ in range(15)]
... mix = supriya.ugens.Mix.new(oscillators)
...
>>> synthdef = builder.build("mix2")
>>> supriya.graph(synthdef) # doctest: +SKIP
::
>>> print(synthdef)
synthdef:
name: mix2
ugens:
- DC.ar/0:
source: 1.0
- DC.ar/1:
source: 1.0
- DC.ar/2:
source: 1.0
- DC.ar/3:
source: 1.0
- Sum4.ar/0:
input_four: DC.ar/3[0]
input_one: DC.ar/0[0]
input_three: DC.ar/2[0]
input_two: DC.ar/1[0]
- DC.ar/4:
source: 1.0
- DC.ar/5:
source: 1.0
- DC.ar/6:
source: 1.0
- DC.ar/7:
source: 1.0
- Sum4.ar/1:
input_four: DC.ar/7[0]
input_one: DC.ar/4[0]
input_three: DC.ar/6[0]
input_two: DC.ar/5[0]
- DC.ar/8:
source: 1.0
- DC.ar/9:
source: 1.0
- DC.ar/10:
source: 1.0
- DC.ar/11:
source: 1.0
- Sum4.ar/2:
input_four: DC.ar/11[0]
input_one: DC.ar/8[0]
input_three: DC.ar/10[0]
input_two: DC.ar/9[0]
- DC.ar/12:
source: 1.0
- DC.ar/13:
source: 1.0
- DC.ar/14:
source: 1.0
- Sum3.ar:
input_one: DC.ar/12[0]
input_three: DC.ar/14[0]
input_two: DC.ar/13[0]
- Sum4.ar/3:
input_four: Sum3.ar[0]
input_one: Sum4.ar/0[0]
input_three: Sum4.ar/2[0]
input_two: Sum4.ar/1[0]
"""
### PRIVATE METHODS ###
@classmethod
def _flatten_sources(cls, sources):
import supriya.synthdefs
flattened_sources = []
for source in sources:
if isinstance(source, supriya.synthdefs.UGenArray):
flattened_sources.extend(source)
else:
flattened_sources.append(source)
return supriya.synthdefs.UGenArray(flattened_sources)
### PUBLIC METHODS ###
@classmethod
def new(cls, sources):
import supriya.synthdefs
import supriya.ugens
sources = cls._flatten_sources(sources)
summed_sources = []
for part in utils.group_iterable_by_count(sources, 4):
if len(part) == 4:
summed_sources.append(supriya.ugens.Sum4(*part))
elif len(part) == 3:
summed_sources.append(supriya.ugens.Sum3(*part))
elif len(part) == 2:
summed_sources.append(part[0] + part[1])
else:
summed_sources.append(part[0])
if len(summed_sources) == 1:
return summed_sources[0]
return Mix.new(summed_sources)
@classmethod
def multichannel(cls, sources, channel_count):
"""
Segment by channel count and mix down in parallel.
.. container:: example
Combine panner outputs, first with first, second with second, etc.
::
>>> source = supriya.ugens.SinOsc.ar(frequency=[440, 660, 880])
>>> panner = supriya.ugens.PanAz.ar(
... channel_count=4,
... source=source,
... position=supriya.ugens.LFNoise2.kr(),
... )
>>> mix = supriya.ugens.Mix.multichannel(panner, channel_count=4)
>>> out = supriya.ugens.Out.ar(bus=0, source=mix)
>>> supriya.graph(out) # doctest: +SKIP
::
>>> print(out)
synthdef:
name: ...
ugens:
- SinOsc.ar/0:
frequency: 440.0
phase: 0.0
- LFNoise2.kr:
frequency: 500.0
- PanAz.ar/0:
amplitude: 1.0
orientation: 0.5
position: LFNoise2.kr[0]
source: SinOsc.ar/0[0]
width: 2.0
- SinOsc.ar/1:
frequency: 660.0
phase: 0.0
- PanAz.ar/1:
amplitude: 1.0
orientation: 0.5
position: LFNoise2.kr[0]
source: SinOsc.ar/1[0]
width: 2.0
- SinOsc.ar/2:
frequency: 880.0
phase: 0.0
- PanAz.ar/2:
amplitude: 1.0
orientation: 0.5
position: LFNoise2.kr[0]
source: SinOsc.ar/2[0]
width: 2.0
- Sum3.ar/0:
input_one: PanAz.ar/0[0]
input_three: PanAz.ar/2[0]
input_two: PanAz.ar/1[0]
- Sum3.ar/1:
input_one: PanAz.ar/0[1]
input_three: PanAz.ar/2[1]
input_two: PanAz.ar/1[1]
- Sum3.ar/2:
input_one: PanAz.ar/0[2]
input_three: PanAz.ar/2[2]
input_two: PanAz.ar/1[2]
- Sum3.ar/3:
input_one: PanAz.ar/0[3]
input_three: PanAz.ar/2[3]
input_two: PanAz.ar/1[3]
- Out.ar:
bus: 0.0
source[0]: Sum3.ar/0[0]
source[1]: Sum3.ar/1[0]
source[2]: Sum3.ar/2[0]
source[3]: Sum3.ar/3[0]
Compare with a non-multichannel mixdown:
>>> mix = supriya.ugens.Mix.new(panner)
>>> out = supriya.ugens.Out.ar(bus=0, source=mix)
>>> supriya.graph(out) # doctest: +SKIP
::
>>> print(out)
synthdef:
name: ...
ugens:
- SinOsc.ar/0:
frequency: 440.0
phase: 0.0
- LFNoise2.kr:
frequency: 500.0
- PanAz.ar/0:
amplitude: 1.0
orientation: 0.5
position: LFNoise2.kr[0]
source: SinOsc.ar/0[0]
width: 2.0
- Sum4.ar/0:
input_four: PanAz.ar/0[3]
input_one: PanAz.ar/0[0]
input_three: PanAz.ar/0[2]
input_two: PanAz.ar/0[1]
- SinOsc.ar/1:
frequency: 660.0
phase: 0.0
- PanAz.ar/1:
amplitude: 1.0
orientation: 0.5
position: LFNoise2.kr[0]
source: SinOsc.ar/1[0]
width: 2.0
- Sum4.ar/1:
input_four: PanAz.ar/1[3]
input_one: PanAz.ar/1[0]
input_three: PanAz.ar/1[2]
input_two: PanAz.ar/1[1]
- SinOsc.ar/2:
frequency: 880.0
phase: 0.0
- PanAz.ar/2:
amplitude: 1.0
orientation: 0.5
position: LFNoise2.kr[0]
source: SinOsc.ar/2[0]
width: 2.0
- Sum4.ar/2:
input_four: PanAz.ar/2[3]
input_one: PanAz.ar/2[0]
input_three: PanAz.ar/2[2]
input_two: PanAz.ar/2[1]
- Sum3.ar:
input_one: Sum4.ar/0[0]
input_three: Sum4.ar/2[0]
input_two: Sum4.ar/1[0]
- Out.ar:
bus: 0.0
source[0]: Sum3.ar[0]
"""
import supriya.synthdefs
sources = cls._flatten_sources(sources)
mixes, parts = [], []
for i in range(0, len(sources), channel_count):
parts.append(sources[i : i + channel_count])
for columns in zip(*parts):
mixes.append(cls.new(columns))
return supriya.synthdefs.UGenArray(mixes)
class MulAdd(UGen):
"""
An Optimized multiplication / addition ugen.
::
>>> source = supriya.ugens.SinOsc.ar()
>>> mul_add = supriya.ugens.MulAdd.new(
... addend=0.5,
... multiplier=-1.5,
... source=source,
... )
>>> mul_add
MulAdd.ar()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
[("source", None), ("multiplier", 1.0), ("addend", 0.0)]
)
### INITIALIZER ###
def __init__(self, addend=0.0, multiplier=1.0, calculation_rate=None, source=None):
UGen.__init__(
self,
addend=addend,
multiplier=multiplier,
calculation_rate=calculation_rate,
source=source,
)
### PRIVATE METHODS ###
@staticmethod
def _inputs_are_valid(source, multiplier, addend):
if CalculationRate.from_expr(source) == CalculationRate.AUDIO:
return True
if CalculationRate.from_expr(source) == CalculationRate.CONTROL:
if CalculationRate.from_expr(multiplier) in (
CalculationRate.CONTROL,
CalculationRate.SCALAR,
):
if CalculationRate.from_expr(addend) in (
CalculationRate.CONTROL,
CalculationRate.SCALAR,
):
return True
return False
@classmethod
def _new_single(
cls, addend=None, multiplier=None, calculation_rate=None, source=None
):
if multiplier == 0.0:
return addend
minus = multiplier == -1
no_multiplier = multiplier == 1
no_addend = addend == 0
if no_multiplier and no_addend:
return source
if minus and no_addend:
return -source
if no_addend:
return source * multiplier
if minus:
return addend - source
if no_multiplier:
return source + addend
if cls._inputs_are_valid(source, multiplier, addend):
return cls(
addend=addend,
multiplier=multiplier,
calculation_rate=calculation_rate,
source=source,
)
if cls._inputs_are_valid(multiplier, source, addend):
return cls(
addend=addend,
multiplier=source,
calculation_rate=calculation_rate,
source=multiplier,
)
return (source * multiplier) + addend
### PUBLIC METHODS ###
@classmethod
def new(cls, source=None, multiplier=1.0, addend=0.0):
"""
Constructs a multiplication / addition ugen.
::
>>> addend = 0.5
>>> multiplier = 1.5
>>> source = supriya.ugens.SinOsc.ar(frequency=[440, 442])
>>> mul_add = supriya.ugens.MulAdd.new(
... addend=addend,
... multiplier=multiplier,
... source=source,
... )
>>> mul_add
UGenArray({2})
Returns ugen graph.
"""
import supriya.synthdefs
# TODO: handle case of array as source
calculation_rate = supriya.CalculationRate.from_expr(
(source, multiplier, addend)
)
ugen = cls._new_expanded(
addend=addend,
multiplier=multiplier,
calculation_rate=calculation_rate,
source=source,
)
return ugen
class Sum3(UGen):
"""
A three-input summing unit generator.
::
>>> input_one = supriya.ugens.SinOsc.ar()
>>> input_two = supriya.ugens.SinOsc.ar(phase=0.1)
>>> input_three = supriya.ugens.SinOsc.ar(phase=0.2)
>>> supriya.ugens.Sum3.new(
... input_one=input_one,
... input_two=input_two,
... input_three=input_three,
... )
Sum3.ar()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
[("input_one", None), ("input_two", None), ("input_three", None)]
)
_valid_calculation_rates = ()
### INITIALIZER ###
def __init__(self, input_one=None, input_two=None, input_three=None):
inputs = [input_one, input_two, input_three]
calculation_rate = CalculationRate.from_expr(inputs)
inputs.sort(key=lambda x: CalculationRate.from_expr(x), reverse=True)
inputs = tuple(inputs)
UGen.__init__(
self,
calculation_rate=calculation_rate,
input_one=input_one,
input_two=input_two,
input_three=input_three,
)
### PRIVATE METHODS ###
@classmethod
def _new_single(cls, input_one=None, input_two=None, input_three=None, **kwargs):
if input_three == 0:
ugen = input_one + input_two
elif input_two == 0:
ugen = input_one + input_three
elif input_one == 0:
ugen = input_two + input_three
else:
ugen = cls(
input_one=input_one, input_two=input_two, input_three=input_three
)
return ugen
class Sum4(UGen):
"""
A four-input summing unit generator.
::
>>> input_one = supriya.ugens.SinOsc.ar()
>>> input_two = supriya.ugens.SinOsc.ar(phase=0.1)
>>> input_three = supriya.ugens.SinOsc.ar(phase=0.2)
>>> input_four = supriya.ugens.SinOsc.ar(phase=0.3)
>>> supriya.ugens.Sum4.new(
... input_one=input_one,
... input_two=input_two,
... input_three=input_three,
... input_four=input_four,
... )
Sum4.ar()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
[
("input_one", None),
("input_two", None),
("input_three", None),
("input_four", None),
]
)
_valid_calculation_rates = ()
### INITIALIZER ###
def __init__(
self, input_one=None, input_two=None, input_three=None, input_four=None
):
inputs = [input_one, input_two, input_three, input_four]
calculation_rate = CalculationRate.from_expr(inputs)
inputs.sort(key=lambda x: CalculationRate.from_expr(x), reverse=True)
inputs = tuple(inputs)
UGen.__init__(
self,
calculation_rate=calculation_rate,
input_one=input_one,
input_two=input_two,
input_three=input_three,
input_four=input_four,
)
### PRIVATE METHODS ###
@classmethod
def _new_single(
cls, input_one=None, input_two=None, input_three=None, input_four=None, **kwargs
):
import supriya.ugens
if input_one == 0:
ugen = supriya.ugens.Sum3.new(
input_one=input_two, input_two=input_three, input_three=input_four
)
elif input_two == 0:
ugen = supriya.ugens.Sum3.new(
input_one=input_one, input_two=input_three, input_three=input_four
)
elif input_three == 0:
ugen = supriya.ugens.Sum3.new(
input_one=input_one, input_two=input_two, input_three=input_four
)
elif input_four == 0:
ugen = supriya.ugens.Sum3.new(
input_one=input_one, input_two=input_two, input_three=input_three
)
else:
ugen = cls(
input_one=input_one,
input_two=input_two,
input_three=input_three,
input_four=input_four,
)
return ugen
|
|
#!/usr/bin/env python
"""
Download all the fic you've saved in Pinboard.
This saves the ebook, PDF and HTML copies available through the "Download" button.
See https://alexwlchan.net/2020/05/downloading-the-ao3-fics-that-i-ve-saved-in-pinboard/
Last updated 17 May 2020.
"""
from __future__ import print_function
import cgi
import errno
import getpass
import json
import os
import re
import sys
try:
from urllib.error import HTTPError
from urllib.request import urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve
from urllib2 import HTTPError
try:
input = raw_input
except NameError:
pass
def get_bookmarks(api_token, filter_tag):
"""
Download your Pinboard bookmarks using the API.
https://pinboard.in/api#posts_all
Get an API token from https://pinboard.in/settings/password
"""
if ":" not in api_token:
sys.exit(
"That doesn't look like a Pinboard API token. It should be username:TOKEN."
)
url = "https://api.pinboard.in/v1/posts/all?format=json&auth_token=%s" % api_token
if filter_tag is not None:
url += "&tag=%s" % filter_tag
local_filename, _ = urlretrieve(url)
return json.load(open(local_filename))
def get_ao3_identifiers(bookmarks):
"""
Given the output from Pinboard's /posts/all API, return all the unique AO3
identifiers. This is the numeric ID after /works.
e.g. The ID in https://archiveofourown.org/works/1160745 is 1160745
"""
saved_urls = [bk["href"] for bk in bookmarks]
AO3_LINK_RE = re.compile(
r"^https?://archiveofourown\.org/works/(?P<work_id>\d+)"
)
ao3_identifiers = set()
for saved_bookmark in bookmarks:
url = saved_bookmark["href"]
match = AO3_LINK_RE.match(url)
if match is None:
continue
ao3_identifiers.add(match.group("work_id"))
return ao3_identifiers
def mkdir_p(path):
"""
Create a directory if it doesn't already exist.
"""
# https://stackoverflow.com/a/600612/1558022
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def download_work_exports(ao3_id, download_formats):
"""
Download all the exports for a given work.
"""
# AO3 returns the original filename, including the fic title, in the
# Content-Disposition header.
try:
filename, headers = urlretrieve(
"https://www.archiveofourown.org/downloads/%s/a.azw3" % ao3_id
)
except HTTPError as err:
print(
"Error downloading https://www.archiveofourown.org/works/%s: %s"
% (ao3_id, err),
file=sys.stderr
)
return
# AO3 doesn't return an HTTP 500 status code if there's a server error --
# for example, if a fic you're looking for doesn't exist.
#
# Instead, you'll get an error when the Content-Disposition header fails to
# parse, specifically:
#
# TypeError: can only concatenate str (not "NoneType") to str
#
# We only look for this 500 error if we get the TypeError, because opening
# up the file and doing a string check is moderately slow.
try:
content_disposition_header = headers["Content-Disposition"]
_, params = cgi.parse_header(content_disposition_header)
except TypeError:
with open(filename, "rb") as azw3_file:
if b'<h2 class="heading">Error 500</h2>' in azw3_file.read():
print(
"Error downloading https://www.archiveofourown.org/works/%s" %
ao3_id,
file=sys.stderr
)
return
raise
else:
title = params["filename"][:-len(".azw3")]
print("Downloading %s (%s)" % (title, ao3_id))
dirname = os.path.join("ao3", "%s (%s)" % (title, ao3_id))
mkdir_p(dirname)
# Download all the selected exports for this fic.
for extension in download_formats:
out_path = os.path.join(dirname, "%s.%s" % (title, extension))
if os.path.exists(out_path):
continue
urlretrieve(
"https://www.archiveofourown.org/downloads/%s/a.%s" % (ao3_id, extension),
filename=out_path
)
if __name__ == "__main__":
api_token = getpass.getpass(
"What is your Pinboard API token? "
"Get it from https://pinboard.in/settings/password\n"
"(press enter when done)\n> "
)
print("")
action = input(
"Would you like to:\n"
"1) download all your fic\n"
"2) filter to a specific tag\n"
"Type 1 or 2, and press enter: "
).strip()
if action == "1":
filter_tag = None
description = "Fetching all your bookmarks from Pinboard"
elif action == "2":
filter_tag = input("\nWhat tag do you want to filter by?\n> ").strip()
description = "Fetching all your bookmarks tagged with %s" % filter_tag
else:
sys.exit("Unrecognised action: %s" % action)
available_formats = {"azw3", "epub", "html", "mobi", "pdf"}
print("")
action = input(
"Would you like to:\n"
"1) download every format (%s)\n" % ", ".join(available_formats) +
"2) only download one format\n"
"Type 1 or 2, and press enter: "
).strip()
if action == "1":
download_formats = available_formats
elif action == "2":
selected_format = input(
"\nWhat format do you want to download?\n> "
).strip().lower()
if selected_format not in available_formats:
sys.exit("Unrecognised format: %s" % selected_format)
download_formats = [selected_format]
else:
sys.exit("Unrecognised action: %s" % action)
print("")
print(description)
bookmarks = get_bookmarks(api_token=api_token, filter_tag=filter_tag)
ao3_identifiers = get_ao3_identifiers(bookmarks=bookmarks)
for ao3_id in sorted(ao3_identifiers):
download_work_exports(ao3_id=ao3_id, download_formats=download_formats)
|
|
from __future__ import unicode_literals, division, absolute_import
import urllib
import logging
import re
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
log = logging.getLogger('regexp')
class FilterRegexp(object):
"""
All possible forms.
regexp:
[operation]: # operation to perform on matches
- [regexp] # simple regexp
- [regexp]: <path> # override path
- [regexp]:
[path]: <path> # override path
[not]: <regexp> # not match
[from]: <field> # search from given entry field
- [regexp]:
[path]: <path> # override path
[not]: # list of not match regexps
- <regexp>
[from]: # search only from these fields
- <field>
[operation]:
- <regexp>
[rest]: <operation> # non matching entries are
[from]: # search only from these fields for all regexps
- <field>
Possible operations: accept, reject, accept_excluding, reject_excluding
"""
schema = {
'type': 'object',
'properties': {
'accept': {'$ref': '#/definitions/regex_list'},
'reject': {'$ref': '#/definitions/regex_list'},
'accept_excluding': {'$ref': '#/definitions/regex_list'},
'reject_excluding': {'$ref': '#/definitions/regex_list'},
'rest': {'type': 'string', 'enum': ['accept', 'reject']},
'from': one_or_more({'type': 'string'})
},
'additionalProperties': False,
'definitions': {
# The validator for a list of regexps, each with or without settings
'regex_list': {
'type': 'array',
'items': {
'oneOf': [
# Plain regex string
{'type': 'string', 'format': 'regex'},
# Regex with options (regex is key, options are value)
{
'type': 'object',
'additionalProperties': {
'oneOf': [
# Simple options, just path
{'type': 'string', 'format': 'path'},
# Dict style options
{
'type': 'object',
'properties': {
'path': {'type': 'string', 'format': 'path'},
'set': {'type': 'object'},
'not': one_or_more({'type': 'string', 'format': 'regex'}),
'from': one_or_more({'type': 'string'})
},
'additionalProperties': False
}
]
}
}
]
}
}
}
}
def prepare_config(self, config):
"""Returns the config in standard format.
All regexps are turned into dictionaries in the form of {compiled regexp: options}
:param config: Dict that can optionally contain the following keys
path: will be attached to entries that match
set: a dict of values to be attached to entries that match via set plugin
from: a list of fields in entry for the regexps to match against
not: a list of compiled regexps that if matching, will disqualify the main match
:return: New config dictionary
"""
out_config = {}
if 'rest' in config:
out_config['rest'] = config['rest']
# Turn all our regexps into advanced form dicts and compile them
for operation, regexps in config.iteritems():
if operation in ['rest', 'from']:
continue
for regexp_item in regexps:
if not isinstance(regexp_item, dict):
regexp = regexp_item
regexp_item = {regexp: {}}
regexp, opts = regexp_item.items()[0]
# Parse custom settings for this regexp
if not isinstance(opts, dict):
opts = {'path': opts}
else:
# We don't want to modify original config
opts = opts.copy()
# advanced configuration
if config.get('from'):
opts.setdefault('from', config['from'])
# Put plain strings into list form for `from` and `not` options
if 'from' in opts and isinstance(opts['from'], basestring):
opts['from'] = [opts['from']]
if 'not' in opts and isinstance(opts['not'], basestring):
opts['not'] = [opts['not']]
# compile `not` option regexps
if 'not' in opts:
opts['not'] = [re.compile(not_re, re.IGNORECASE | re.UNICODE) for not_re in opts['not']]
# compile regexp and make sure regexp is a string for series like '24'
regexp = re.compile(unicode(regexp), re.IGNORECASE | re.UNICODE)
out_config.setdefault(operation, []).append({regexp: opts})
return out_config
@plugin.priority(172)
def on_task_filter(self, task, config):
# TODO: what if accept and accept_excluding configured? Should raise error ...
config = self.prepare_config(config)
rest = []
for operation, regexps in config.iteritems():
if operation == 'rest':
continue
leftovers = self.filter(task, operation, regexps)
if not rest:
rest = leftovers
else:
# If there is already something in rest, take the intersection with r (entries no operations matched)
rest = [entry for entry in leftovers if entry in rest]
if 'rest' in config:
rest_method = Entry.accept if config['rest'] == 'accept' else Entry.reject
for entry in rest:
log.debug('Rest method %s for %s' % (config['rest'], entry['title']))
rest_method(entry, 'regexp `rest`')
def matches(self, entry, regexp, find_from=None, not_regexps=None):
"""
Check if :entry: has any string fields or strings in a list field that match :regexp:
:param entry: Entry instance
:param regexp: Compiled regexp
:param find_from: None or a list of fields to search from
:param not_regexps: None or list of regexps that can NOT match
:return: Field matching
"""
unquote = ['url']
for field in find_from or ['title', 'description']:
# Only evaluate lazy fields if find_from has been explicitly specified
if not entry.get(field, eval_lazy=find_from):
continue
# Make all fields into lists for search purposes
values = entry[field]
if not isinstance(values, list):
values = [values]
for value in values:
if not isinstance(value, basestring):
continue
if field in unquote:
value = urllib.unquote(value)
# If none of the not_regexps match
if regexp.search(value):
# Make sure the not_regexps do not match for this field
for not_regexp in not_regexps or []:
if self.matches(entry, not_regexp, find_from=[field]):
entry.trace('Configured not_regexp %s matched, ignored' % not_regexp)
break
else: # None of the not_regexps matched
return field
def filter(self, task, operation, regexps):
"""
:param task: Task instance
:param operation: one of 'accept' 'reject' 'accept_excluding' and 'reject_excluding'
accept and reject will be called on the entry if any of the regxps match
*_excluding operations will be called if any of the regexps don't match
:param regexps: list of {compiled_regexp: options} dictionaries
:return: Return list of entries that didn't match regexps
"""
rest = []
method = Entry.accept if 'accept' in operation else Entry.reject
match_mode = 'excluding' not in operation
for entry in task.entries:
log.trace('testing %i regexps to %s' % (len(regexps), entry['title']))
for regexp_opts in regexps:
regexp, opts = regexp_opts.items()[0]
# check if entry matches given regexp configuration
field = self.matches(entry, regexp, opts.get('from'), opts.get('not'))
# Run if we are in match mode and have a hit, or are in non-match mode and don't have a hit
if match_mode == bool(field):
# Creates the string with the reason for the hit
matchtext = 'regexp \'%s\' ' % regexp.pattern + ('matched field \'%s\'' %
field if match_mode else 'didn\'t match')
log.debug('%s for %s' % (matchtext, entry['title']))
# apply settings to entry and run the method on it
if opts.get('path'):
entry['path'] = opts['path']
if opts.get('set'):
# invoke set plugin with given configuration
log.debug('adding set: info to entry:"%s" %s' % (entry['title'], opts['set']))
set = plugin.get_plugin_by_name('set')
set.instance.modify(entry, opts['set'])
method(entry, matchtext)
# We had a match so break out of the regexp loop.
break
else:
# We didn't run method for any of the regexps, add this entry to rest
entry.trace('None of configured %s regexps matched' % operation)
rest.append(entry)
return rest
@event('plugin.register')
def register_plugin():
plugin.register(FilterRegexp, 'regexp', api_ver=2)
|
|
""" Unit tests for nonlinear solvers
Author: Ondrej Certik
May 2007
"""
from numpy.testing import assert_
import pytest
from scipy.optimize import nonlin, root
from numpy import diag, dot
from numpy.linalg import inv
import numpy as np
from .test_minpack import pressure_network
SOLVERS = {'anderson': nonlin.anderson, 'diagbroyden': nonlin.diagbroyden,
'linearmixing': nonlin.linearmixing, 'excitingmixing': nonlin.excitingmixing,
'broyden1': nonlin.broyden1, 'broyden2': nonlin.broyden2,
'krylov': nonlin.newton_krylov}
MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1,
'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov}
#-------------------------------------------------------------------------------
# Test problems
#-------------------------------------------------------------------------------
def F(x):
x = np.asarray(x).T
d = diag([3,2,1.5,1,0.5])
c = 0.01
f = -d @ x - c * float(x.T @ x) * x
return f
F.xin = [1,1,1,1,1]
F.KNOWN_BAD = {}
def F2(x):
return x
F2.xin = [1,2,3,4,5,6]
F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing}
def F2_lucky(x):
return x
F2_lucky.xin = [0,0,0,0,0,0]
F2_lucky.KNOWN_BAD = {}
def F3(x):
A = np.array([[-2, 1, 0.], [1, -2, 1], [0, 1, -2]])
b = np.array([1, 2, 3.])
return A @ x - b
F3.xin = [1,2,3]
F3.KNOWN_BAD = {}
def F4_powell(x):
A = 1e4
return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)]
F4_powell.xin = [-1, -2]
F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing,
'diagbroyden': nonlin.diagbroyden}
def F5(x):
return pressure_network(x, 4, np.array([.5, .5, .5, .5]))
F5.xin = [2., 0, 2, 0]
F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
def F6(x):
x1, x2 = x
J0 = np.array([[-4.256, 14.7],
[0.8394989, 0.59964207]])
v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6,
np.sin(x2 * np.exp(x1) - 1)])
return -np.linalg.solve(J0, v)
F6.xin = [-0.5, 1.4]
F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
#-------------------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------------------
class TestNonlin(object):
"""
Check the Broyden methods for a few test problems.
broyden1, broyden2, and newton_krylov must succeed for
all functions. Some of the others don't -- tests in KNOWN_BAD are skipped.
"""
def _check_nonlin_func(self, f, func, f_tol=1e-2):
x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0)
assert_(np.absolute(f(x)).max() < f_tol)
def _check_root(self, f, method, f_tol=1e-2):
res = root(f, f.xin, method=method,
options={'ftol': f_tol, 'maxiter': 200, 'disp': 0})
assert_(np.absolute(res.fun).max() < f_tol)
@pytest.mark.xfail
def _check_func_fail(self, *a, **kw):
pass
def test_problem_nonlin(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for func in SOLVERS.values():
if func in f.KNOWN_BAD.values():
if func in MUST_WORK.values():
self._check_func_fail(f, func)
continue
self._check_nonlin_func(f, func)
def test_tol_norm_called(self):
# Check that supplying tol_norm keyword to nonlin_solve works
self._tol_norm_used = False
def local_norm_func(x):
self._tol_norm_used = True
return np.absolute(x).max()
nonlin.newton_krylov(F, F.xin, f_tol=1e-2, maxiter=200, verbose=0,
tol_norm=local_norm_func)
assert_(self._tol_norm_used)
def test_problem_root(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for meth in SOLVERS:
if meth in f.KNOWN_BAD:
if meth in MUST_WORK:
self._check_func_fail(f, meth)
continue
self._check_root(f, meth)
class TestSecant(object):
"""Check that some Jacobian approximations satisfy the secant condition"""
xs = [np.array([1,2,3,4,5], float),
np.array([2,3,4,5,1], float),
np.array([3,4,5,1,2], float),
np.array([4,5,1,2,3], float),
np.array([9,1,9,1,3], float),
np.array([0,1,9,1,3], float),
np.array([5,5,7,1,1], float),
np.array([1,2,7,5,1], float),]
fs = [x**2 - 1 for x in xs]
def _check_secant(self, jac_cls, npoints=1, **kw):
"""
Check that the given Jacobian approximation satisfies secant
conditions for last `npoints` points.
"""
jac = jac_cls(**kw)
jac.setup(self.xs[0], self.fs[0], None)
for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
jac.update(x, f)
for k in range(min(npoints, j+1)):
dx = self.xs[j-k+1] - self.xs[j-k]
df = self.fs[j-k+1] - self.fs[j-k]
assert_(np.allclose(dx, jac.solve(df)))
# Check that the `npoints` secant bound is strict
if j >= npoints:
dx = self.xs[j-npoints+1] - self.xs[j-npoints]
df = self.fs[j-npoints+1] - self.fs[j-npoints]
assert_(not np.allclose(dx, jac.solve(df)))
def test_broyden1(self):
self._check_secant(nonlin.BroydenFirst)
def test_broyden2(self):
self._check_secant(nonlin.BroydenSecond)
def test_broyden1_update(self):
# Check that BroydenFirst update works as for a dense matrix
jac = nonlin.BroydenFirst(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
B = np.identity(5) * (-1/0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
B += (df - dot(B, dx))[:,None] * dx[None,:] / dot(dx, dx)
jac.update(x, f)
assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13))
def test_broyden2_update(self):
# Check that BroydenSecond update works as for a dense matrix
jac = nonlin.BroydenSecond(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
H = np.identity(5) * (-0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
H += (dx - dot(H, df))[:,None] * df[None,:] / dot(df, df)
jac.update(x, f)
assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13))
def test_anderson(self):
# Anderson mixing (with w0=0) satisfies secant conditions
# for the last M iterates, see [Ey]_
#
# .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3)
class TestLinear(object):
"""Solve a linear equation;
some methods find the exact solution in a finite number of steps"""
def _check(self, jac, N, maxiter, complex=False, **kw):
np.random.seed(123)
A = np.random.randn(N, N)
if complex:
A = A + 1j*np.random.randn(N, N)
b = np.random.randn(N)
if complex:
b = b + 1j*np.random.randn(N)
def func(x):
return dot(A, x) - b
sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter,
f_tol=1e-6, line_search=None, verbose=0)
assert_(np.allclose(dot(A, sol), b, atol=1e-6))
def test_broyden1(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True)
def test_broyden2(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True)
def test_anderson(self):
# Anderson is rather similar to Broyden, if given enough storage space
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False)
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True)
def test_krylov(self):
# Krylov methods solve linear systems exactly in N inner steps
self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10)
self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10)
class TestJacobianDotSolve(object):
"""Check that solve/dot methods in Jacobian approximations are consistent"""
def _func(self, x):
return x**2 - 1 + np.dot(self.A, x)
def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
np.random.seed(123)
N = 7
def rand(*a):
q = np.random.rand(*a)
if complex:
q = q + 1j*np.random.rand(*a)
return q
def assert_close(a, b, msg):
d = abs(a - b).max()
f = tol + abs(b).max()*tol
if d > f:
raise AssertionError('%s: err %g' % (msg, d))
self.A = rand(N, N)
# initialize
x0 = np.random.rand(N)
jac = jac_cls(**kw)
jac.setup(x0, self._func(x0), self._func)
# check consistency
for k in range(2*N):
v = rand(N)
if hasattr(jac, '__array__'):
Jd = np.array(jac)
if hasattr(jac, 'solve'):
Gv = jac.solve(v)
Gv2 = np.linalg.solve(Jd, v)
assert_close(Gv, Gv2, 'solve vs array')
if hasattr(jac, 'rsolve'):
Gv = jac.rsolve(v)
Gv2 = np.linalg.solve(Jd.T.conj(), v)
assert_close(Gv, Gv2, 'rsolve vs array')
if hasattr(jac, 'matvec'):
Jv = jac.matvec(v)
Jv2 = np.dot(Jd, v)
assert_close(Jv, Jv2, 'dot vs array')
if hasattr(jac, 'rmatvec'):
Jv = jac.rmatvec(v)
Jv2 = np.dot(Jd.T.conj(), v)
assert_close(Jv, Jv2, 'rmatvec vs array')
if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
Jv = jac.matvec(v)
Jv2 = jac.solve(jac.matvec(Jv))
assert_close(Jv, Jv2, 'dot vs solve')
if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
Jv = jac.rmatvec(v)
Jv2 = jac.rmatvec(jac.rsolve(Jv))
assert_close(Jv, Jv2, 'rmatvec vs rsolve')
x = rand(N)
jac.update(x, self._func(x))
def test_broyden1(self):
self._check_dot(nonlin.BroydenFirst, complex=False)
self._check_dot(nonlin.BroydenFirst, complex=True)
def test_broyden2(self):
self._check_dot(nonlin.BroydenSecond, complex=False)
self._check_dot(nonlin.BroydenSecond, complex=True)
def test_anderson(self):
self._check_dot(nonlin.Anderson, complex=False)
self._check_dot(nonlin.Anderson, complex=True)
def test_diagbroyden(self):
self._check_dot(nonlin.DiagBroyden, complex=False)
self._check_dot(nonlin.DiagBroyden, complex=True)
def test_linearmixing(self):
self._check_dot(nonlin.LinearMixing, complex=False)
self._check_dot(nonlin.LinearMixing, complex=True)
def test_excitingmixing(self):
self._check_dot(nonlin.ExcitingMixing, complex=False)
self._check_dot(nonlin.ExcitingMixing, complex=True)
def test_krylov(self):
self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-3)
self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-3)
class TestNonlinOldTests(object):
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def test_broyden1(self):
x = nonlin.broyden1(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_broyden2(self):
x = nonlin.broyden2(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_anderson(self):
x = nonlin.anderson(F,F.xin,iter=12,alpha=0.03,M=5)
assert_(nonlin.norm(x) < 0.33)
def test_linearmixing(self):
x = nonlin.linearmixing(F,F.xin,iter=60,alpha=0.5)
assert_(nonlin.norm(x) < 1e-7)
assert_(nonlin.norm(F(x)) < 1e-7)
def test_exciting(self):
x = nonlin.excitingmixing(F,F.xin,iter=20,alpha=0.5)
assert_(nonlin.norm(x) < 1e-5)
assert_(nonlin.norm(F(x)) < 1e-5)
def test_diagbroyden(self):
x = nonlin.diagbroyden(F,F.xin,iter=11,alpha=1)
assert_(nonlin.norm(x) < 1e-8)
assert_(nonlin.norm(F(x)) < 1e-8)
def test_root_broyden1(self):
res = root(F, F.xin, method='broyden1',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_broyden2(self):
res = root(F, F.xin, method='broyden2',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_anderson(self):
res = root(F, F.xin, method='anderson',
options={'nit': 12,
'jac_options': {'alpha': 0.03, 'M': 5}})
assert_(nonlin.norm(res.x) < 0.33)
def test_root_linearmixing(self):
res = root(F, F.xin, method='linearmixing',
options={'nit': 60,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-7)
assert_(nonlin.norm(res.fun) < 1e-7)
def test_root_excitingmixing(self):
res = root(F, F.xin, method='excitingmixing',
options={'nit': 20,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-5)
assert_(nonlin.norm(res.fun) < 1e-5)
def test_root_diagbroyden(self):
res = root(F, F.xin, method='diagbroyden',
options={'nit': 11,
'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-8)
assert_(nonlin.norm(res.fun) < 1e-8)
|
|
import re
import sys
import urllib
import bs4
from .browser import Browser
from .form import Form
from .utils import LinkNotFoundError
from requests.structures import CaseInsensitiveDict
class _BrowserState:
def __init__(self, page=None, url=None, form=None, request=None):
self.page = page
self.url = url
self.form = form
self.request = request
class StatefulBrowser(Browser):
"""An extension of :class:`Browser` that stores the browser's state
and provides many convenient functions for interacting with HTML elements.
It is the primary tool in MechanicalSoup for interfacing with websites.
:param session: Attach a pre-existing requests Session instead of
constructing a new one.
:param soup_config: Configuration passed to BeautifulSoup to affect
the way HTML is parsed. Defaults to ``{'features': 'lxml'}``.
If overriden, it is highly recommended to `specify a parser
<https://www.crummy.com/software/BeautifulSoup/bs4/doc/#specifying-the-parser-to-use>`__.
Otherwise, BeautifulSoup will issue a warning and pick one for
you, but the parser it chooses may be different on different
machines.
:param requests_adapters: Configuration passed to requests, to affect
the way HTTP requests are performed.
:param raise_on_404: If True, raise :class:`LinkNotFoundError`
when visiting a page triggers a 404 Not Found error.
:param user_agent: Set the user agent header to this value.
All arguments are forwarded to :func:`Browser`.
Examples ::
browser = mechanicalsoup.StatefulBrowser(
soup_config={'features': 'lxml'}, # Use the lxml HTML parser
raise_on_404=True,
user_agent='MyBot/0.1: mysite.example.com/bot_info',
)
browser.open(url)
# ...
browser.close()
Once not used anymore, the browser can be closed
using :func:`~Browser.close`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__debug = False
self.__verbose = 0
self.__state = _BrowserState()
# Aliases for backwards compatibility
# (Included specifically in __init__ to suppress them in Sphinx docs)
self.get_current_page = lambda: self.page
# Almost same as self.form, but don't raise an error if no
# form was selected for backward compatibility.
self.get_current_form = lambda: self.__state.form
self.get_url = lambda: self.url
def set_debug(self, debug):
"""Set the debug mode (off by default).
Set to True to enable debug mode. When active, some actions
will launch a browser on the current page on failure to let
you inspect the page content.
"""
self.__debug = debug
def get_debug(self):
"""Get the debug mode (off by default)."""
return self.__debug
def set_verbose(self, verbose):
"""Set the verbosity level (an integer).
* 0 means no verbose output.
* 1 shows one dot per visited page (looks like a progress bar)
* >= 2 shows each visited URL.
"""
self.__verbose = verbose
def get_verbose(self):
"""Get the verbosity level. See :func:`set_verbose()`."""
return self.__verbose
@property
def page(self):
"""Get the current page as a soup object."""
return self.__state.page
@property
def url(self):
"""Get the URL of the currently visited page."""
return self.__state.url
@property
def form(self):
"""Get the currently selected form as a :class:`Form` object.
See :func:`select_form`.
"""
if self.__state.form is None:
raise AttributeError("No form has been selected yet on this page.")
return self.__state.form
def __setitem__(self, name, value):
"""Call item assignment on the currently selected form.
See :func:`Form.__setitem__`.
"""
self.form[name] = value
def new_control(self, type, name, value, **kwargs):
"""Call :func:`Form.new_control` on the currently selected form."""
return self.form.new_control(type, name, value, **kwargs)
def absolute_url(self, url):
"""Return the absolute URL made from the current URL and ``url``.
The current URL is only used to provide any missing components of
``url``, as in the `.urljoin() method of urllib.parse
<https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urljoin>`__.
"""
return urllib.parse.urljoin(self.url, url)
def open(self, url, *args, **kwargs):
"""Open the URL and store the Browser's state in this object.
All arguments are forwarded to :func:`Browser.get`.
:return: Forwarded from :func:`Browser.get`.
"""
if self.__verbose == 1:
sys.stdout.write('.')
sys.stdout.flush()
elif self.__verbose >= 2:
print(url)
resp = self.get(url, *args, **kwargs)
self.__state = _BrowserState(page=resp.soup, url=resp.url,
request=resp.request)
return resp
def open_fake_page(self, page_text, url=None, soup_config=None):
"""Mock version of :func:`open`.
Behave as if opening a page whose text is ``page_text``, but do not
perform any network access. If ``url`` is set, pretend it is the page's
URL. Useful mainly for testing.
"""
soup_config = soup_config or self.soup_config
self.__state = _BrowserState(
page=bs4.BeautifulSoup(page_text, **soup_config),
url=url)
def open_relative(self, url, *args, **kwargs):
"""Like :func:`open`, but ``url`` can be relative to the currently
visited page.
"""
return self.open(self.absolute_url(url), *args, **kwargs)
def refresh(self):
"""Reload the current page with the same request as originally done.
Any change (`select_form`, or any value filled-in in the form) made to
the current page before refresh is discarded.
:raise ValueError: Raised if no refreshable page is loaded, e.g., when
using the shallow ``Browser`` wrapper functions.
:return: Response of the request."""
old_request = self.__state.request
if old_request is None:
raise ValueError('The current page is not refreshable. Either no '
'page is opened or low-level browser methods '
'were used to do so')
resp = self.session.send(old_request)
Browser.add_soup(resp, self.soup_config)
self.__state = _BrowserState(page=resp.soup, url=resp.url,
request=resp.request)
return resp
def select_form(self, selector="form", nr=0):
"""Select a form in the current page.
:param selector: CSS selector or a bs4.element.Tag object to identify
the form to select.
If not specified, ``selector`` defaults to "form", which is
useful if, e.g., there is only one form on the page.
For ``selector`` syntax, see the `.select() method in BeautifulSoup
<https://www.crummy.com/software/BeautifulSoup/bs4/doc/#css-selectors>`__.
:param nr: A zero-based index specifying which form among those that
match ``selector`` will be selected. Useful when one or more forms
have the same attributes as the form you want to select, and its
position on the page is the only way to uniquely identify it.
Default is the first matching form (``nr=0``).
:return: The selected form as a soup object. It can also be
retrieved later with the :attr:`form` attribute.
"""
if isinstance(selector, bs4.element.Tag):
if selector.name != "form":
raise LinkNotFoundError
self.__state.form = Form(selector)
else:
# nr is a 0-based index for consistency with mechanize
found_forms = self.page.select(selector,
limit=nr + 1)
if len(found_forms) != nr + 1:
if self.__debug:
print('select_form failed for', selector)
self.launch_browser()
raise LinkNotFoundError()
self.__state.form = Form(found_forms[-1])
return self.form
def _merge_referer(self, **kwargs):
"""Helper function to set the Referer header in kwargs passed to
requests, if it has not already been overriden by the user."""
referer = self.url
headers = CaseInsensitiveDict(kwargs.get('headers', {}))
if referer is not None and 'Referer' not in headers:
headers['Referer'] = referer
kwargs['headers'] = headers
return kwargs
def submit_selected(self, btnName=None, update_state=True,
*args, **kwargs):
"""Submit the form that was selected with :func:`select_form`.
:return: Forwarded from :func:`Browser.submit`.
If there are multiple submit input/button elements, passes ``btnName``
to :func:`Form.choose_submit` on the current form to choose between
them. If `update_state` is False, form will be submited but the browser
state will remain unchanged. This is useful for forms that result in
a download of a file. All other arguments are forwarded to
:func:`Browser.submit`.
"""
self.form.choose_submit(btnName)
kwargs = self._merge_referer(**kwargs)
resp = self.submit(self.__state.form, url=self.__state.url,
*args, **kwargs)
if update_state:
self.__state = _BrowserState(page=resp.soup, url=resp.url,
request=resp.request)
return resp
def list_links(self, *args, **kwargs):
"""Display the list of links in the current page. Arguments are
forwarded to :func:`links`.
"""
print("Links in the current page:")
for link in self.links(*args, **kwargs):
print(" ", link)
def links(self, url_regex=None, link_text=None, *args, **kwargs):
"""Return links in the page, as a list of bs4.element.Tag objects.
To return links matching specific criteria, specify ``url_regex``
to match the *href*-attribute, or ``link_text`` to match the
*text*-attribute of the Tag. All other arguments are forwarded to
the `.find_all() method in BeautifulSoup
<https://www.crummy.com/software/BeautifulSoup/bs4/doc/#find-all>`__.
"""
all_links = self.page.find_all(
'a', href=True, *args, **kwargs)
if url_regex is not None:
all_links = [a for a in all_links
if re.search(url_regex, a['href'])]
if link_text is not None:
all_links = [a for a in all_links
if a.text == link_text]
return all_links
def find_link(self, *args, **kwargs):
"""Find and return a link, as a bs4.element.Tag object.
The search can be refined by specifying any argument that is accepted
by :func:`links`. If several links match, return the first one found.
If no link is found, raise :class:`LinkNotFoundError`.
"""
links = self.links(*args, **kwargs)
if len(links) == 0:
raise LinkNotFoundError()
else:
return links[0]
def _find_link_internal(self, link, args, kwargs):
"""Wrapper around find_link that deals with convenience special-cases:
* If ``link`` has an *href*-attribute, then return it. If not,
consider it as a ``url_regex`` argument.
* If searching for the link fails and debug is active, launch
a browser.
"""
if hasattr(link, 'attrs') and 'href' in link.attrs:
return link
# Check if "link" parameter should be treated as "url_regex"
# but reject obtaining it from both places.
if link and 'url_regex' in kwargs:
raise ValueError('link parameter cannot be treated as '
'url_regex because url_regex is already '
'present in keyword arguments')
elif link:
kwargs['url_regex'] = link
try:
return self.find_link(*args, **kwargs)
except LinkNotFoundError:
if self.get_debug():
print('find_link failed for', kwargs)
self.list_links()
self.launch_browser()
raise
def follow_link(self, link=None, *args, **kwargs):
"""Follow a link.
If ``link`` is a bs4.element.Tag (i.e. from a previous call to
:func:`links` or :func:`find_link`), then follow the link.
If ``link`` doesn't have a *href*-attribute or is None, treat
``link`` as a url_regex and look it up with :func:`find_link`.
Any additional arguments specified are forwarded to this function.
If the link is not found, raise :class:`LinkNotFoundError`.
Before raising, if debug is activated, list available links in the
page and launch a browser.
:return: Forwarded from :func:`open_relative`.
"""
link = self._find_link_internal(link, args, kwargs)
referer = self.url
headers = {'Referer': referer} if referer else None
return self.open_relative(link['href'], headers=headers)
def download_link(self, link=None, file=None, *args, **kwargs):
"""Downloads the contents of a link to a file. This function behaves
similarly to :func:`follow_link`, but the browser state will
not change when calling this function.
:param file: Filesystem path where the page contents will be
downloaded. If the file already exists, it will be overwritten.
Other arguments are the same as :func:`follow_link` (``link``
can either be a bs4.element.Tag or a URL regex, other
arguments are forwarded to :func:`find_link`).
:return: `requests.Response
<http://docs.python-requests.org/en/master/api/#requests.Response>`__
object.
"""
link = self._find_link_internal(link, args, kwargs)
url = self.absolute_url(link['href'])
referer = self.url
headers = {'Referer': referer} if referer else None
response = self.session.get(url, headers=headers)
if self.raise_on_404 and response.status_code == 404:
raise LinkNotFoundError()
# Save the response content to file
if file is not None:
with open(file, 'wb') as f:
f.write(response.content)
return response
def launch_browser(self, soup=None):
"""Launch a browser to display a page, for debugging purposes.
:param: soup: Page contents to display, supplied as a bs4 soup object.
Defaults to the current page of the ``StatefulBrowser`` instance.
"""
if soup is None:
soup = self.page
super().launch_browser(soup)
|
|
# $Id$
#
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the signatures
"""
from rdkit import Chem
import unittest
from rdkit.Chem.Pharm2D import Generate,SigFactory,Utils
from rdkit.Chem import ChemicalFeatures
import os.path
from rdkit import RDConfig
class TestCase(unittest.TestCase):
def setUp(self):
fdefFile = os.path.join(RDConfig.RDCodeDir,'Chem','Pharm2D','test_data','BaseFeatures.fdef')
featFactory = ChemicalFeatures.BuildFeatureFactory(fdefFile)
self.factory = SigFactory.SigFactory(featFactory,minPointCount=2,maxPointCount=3,
trianglePruneBins=False)
self.factory.SetBins([(0,2),(2,5),(5,8)])
self.factory.Init()
SigFactory._verbose=False
def test1Sizes(self):
self.factory.maxPointCount=2
self.factory.Init()
sig = self.factory.GetSignature()
self.assertEqual(len(sig),45)
self.factory.maxPointCount=3
self.factory.Init()
sig = self.factory.GetSignature()
self.assertEqual(len(sig),990)
self.factory.maxPointCount=4
self.factory.Init()
sig = self.factory.GetSignature()
self.assertEqual(len(sig),18000)
def test2BitIdx(self):
data = [
( (0,0),[0],0 ),
( (0,0),[2],1 ),
( (0,0),[5],2 ),
( (0,1),[5],5 ),
( (1,1),[4],16 ),
( (1,1),[7],17 ),
( (0,0,0),[1,1,1],45),
( (0,0,1),[1,1,1],72),
( (0,0,1),[1,1,3],75),
( (0,0,1),[3,1,1],81),
( (0,0,1),[3,3,1],84),
]
for tpl in data:
patts,dists,bit = tpl
idx = self.factory.GetBitIdx(patts,dists)
self.assertEqual(bit,idx)
cnt,feats,bins = self.factory.GetBitInfo(bit)
self.assertEqual(cnt,len(patts))
self.assertEqual(feats,patts)
def test3BitIdx(self):
""" test 3 point p'cophore ids,
you can never have too much of this stuff
"""
self.factory.SetBins(((0,2),(2,4),(4,8)))
self.factory.Init()
self.assertEqual(self.factory.GetSigSize(),990)
probes = [((0,0,0),(1,3,1),54),
((0,0,0),(3,1,1),54),
((0,0,0),(1,1,3),54),
((0,0,0),(1,3,3),57),
((0,0,1),(1,3,1),75),
]
for patts,dists,ans in probes:
idx = self.factory.GetBitIdx(patts,dists)
self.assertEqual(idx,ans)
cnt,feats,bins = self.factory.GetBitInfo(ans)
self.assertEqual(cnt,len(patts))
self.assertEqual(feats,patts)
def test4BitIdx(self):
self.factory.trianglePruneBins=True
self.factory.Init()
sig = self.factory.GetSignature()
self.assertEqual(len(sig),885)
probes = [((0,0,0),(1,3,1),52),
((0,0,0),(1,1,3),52),
((0,0,0),(3,1,1),52),
((0,0,0),(1,3,3),55),
((0,0,1),(1,3,1),71),
]
for patts,dists,ans in probes:
idx = self.factory.GetBitIdx(patts,dists)
self.assertEqual(idx,ans)
cnt,feats,bins = self.factory.GetBitInfo(ans)
self.assertEqual(cnt,len(patts))
self.assertEqual(feats,patts)
def test5SimpleSig(self):
factory = self.factory
factory.SetBins([(1,3),(3,7),(7,10)])
factory.minPointCount=2
factory.maxPointCount=3
factory.Init()
mol = Chem.MolFromSmiles('O=CCC=O')
sig=Generate.Gen2DFingerprint(mol,factory)
self.assertEqual(len(sig),990)
bs = tuple(sig.GetOnBits())
self.assertEqual(bs,(1,))
mol = Chem.MolFromSmiles('O=CC(CC=O)CCC=O')
sig=Generate.Gen2DFingerprint(mol,factory)
self.assertEqual(len(sig),990)
bs = tuple(sig.GetOnBits())
self.assertEqual(bs,(1,2,67))
def test6SimpleSigCounts(self):
factory = self.factory
factory.SetBins([(1,3),(3,7),(7,10)])
factory.minPointCount=2
factory.maxPointCount=3
factory.useCounts=True
factory.Init()
mol = Chem.MolFromSmiles('O=CCC=O')
sig=Generate.Gen2DFingerprint(mol,factory)
self.assertEqual(sig.GetLength(),990)
cs = tuple(sig.GetNonzeroElements().items())
self.assertEqual(cs,((1,1),))
mol = Chem.MolFromSmiles('O=CC(CC=O)CCC=O')
sig=Generate.Gen2DFingerprint(mol,factory)
self.assertEqual(sig.GetLength(),990)
elems = sig.GetNonzeroElements()
bs = list(elems.keys())
bs.sort()
cs = [(x,elems[x]) for x in bs]
self.assertEqual(tuple(cs),((1,2),(2,1),(67,1)))
def test7SimpleSigSkip(self):
factory = self.factory
factory.SetBins([(1,3),(3,7),(7,10)])
factory.minPointCount=2
factory.maxPointCount=3
factory.skipFeats='Acceptor'
factory.Init()
mol = Chem.MolFromSmiles('O=CCC=O')
sig=Generate.Gen2DFingerprint(mol,factory)
self.assertEqual(len(sig),570)
bs = tuple(sig.GetOnBits())
self.assertEqual(bs,())
def test8MultiPointMatches(self):
factory = self.factory
factory.SetBins([(1,3),(3,7),(7,10)])
factory.minPointCount=2
factory.maxPointCount=3
factory.Init()
mol = Chem.MolFromSmiles('O=Cc1ccccc1')
sig=Generate.Gen2DFingerprint(mol,factory)
self.assertEqual(len(sig),990)
bs = tuple(sig.GetOnBits())
self.assertEqual(bs,(3,))
mol = Chem.MolFromSmiles('O=CCCCCCCCCc1ccccc1')
sig=Generate.Gen2DFingerprint(mol,factory)
self.assertEqual(len(sig),990)
bs = tuple(sig.GetOnBits())
self.assertEqual(bs,())
# FIX: add test for perms argument to Gen2DFingerprint
def test9BondOrderSigs(self):
""" test sigs where bond order is used
"""
factory = self.factory
factory.SetBins([(1,4),(4,7),(7,10)])
factory.minPointCount=2
factory.maxPointCount=3
factory.Init()
mol = Chem.MolFromSmiles('[O-]CCC(=O)')
sig=Generate.Gen2DFingerprint(mol,self.factory)
self.assertEqual(len(sig),990)
bs = tuple(sig.GetOnBits())
self.assertEqual(bs,(1,))
self.factory.includeBondOrder=True
sig=Generate.Gen2DFingerprint(mol,self.factory)
self.assertEqual(len(sig),990)
bs = tuple(sig.GetOnBits())
self.assertEqual(bs,(0,))
def testDefaultFactory(self):
from rdkit.Chem import Pharm2D
factory = Pharm2D.DefaultSigFactory()
#Generate._verbose=True
mol = Chem.MolFromSmiles('OCCC(=O)')
sig=Generate.Gen2DFingerprint(mol,factory)
self.assertEqual(len(sig),19355)
self.assertEqual(tuple(sig.GetOnBits()),(2,16,21,84,1274,4361,))
nPts,combo,scaffold,labels,dMat=factory._GetBitSummaryData(21)
self.assertEqual(nPts,2)
self.assertEqual(labels,['Acceptor','Hydrophobe'])
self.assertEqual(list(dMat[0]),[0,0])
self.assertEqual(list(dMat[1]),[0,0])
txt=factory.GetBitDescription(21)
self.assertEqual(txt,'Acceptor Hydrophobe |0 0|0 0|')
nPts,combo,scaffold,labels,dMat=factory._GetBitSummaryData(2)
self.assertEqual(nPts,2)
self.assertEqual(labels,['Acceptor','Acceptor'])
self.assertEqual(list(dMat[0]),[0,2])
self.assertEqual(list(dMat[1]),[2,0])
nPts,combo,scaffold,labels,dMat=factory._GetBitSummaryData(4361)
self.assertEqual(nPts,3)
self.assertEqual(labels,['Acceptor','Donor','Hydrophobe'])
self.assertEqual(list(dMat[0]),[0,2,0])
self.assertEqual(list(dMat[1]),[2,0,0])
self.assertEqual(list(dMat[2]),[0,0,0])
self.assertEqual(factory.GetBitDescription(4361),
'Acceptor Donor Hydrophobe |0 2 0|2 0 0|0 0 0|')
if __name__ == '__main__':
unittest.main()
|
|
import hashlib
import logging
from lxml import etree
from mock import Mock, patch
import os
import requests
import unittest
from six.moves.urllib.request import url2pathname
from eulfedora.models import DigitalObject
from eulfedora.server import Repository
from eulfedora.syncutil import ArchiveExport, endswith_partial, \
binarycontent_sections
from eulfedora.util import md5sum
from test.test_fedora.base import FIXTURE_ROOT
logger = logging.getLogger(__name__)
FIXTURES = {
'sync1_export': os.path.join(FIXTURE_ROOT, 'synctest1-export.xml'),
'sync2_export': os.path.join(FIXTURE_ROOT, 'synctest2-export.xml')
}
class ArchiveExportTest(unittest.TestCase):
def setUp(self):
# todo: use mocks?
self.repo = Mock(spec=Repository)
self.obj = Mock() #spec=DigitalObject)
self.obj.pid = 'synctest:1'
self.archex = ArchiveExport(self.obj, self.repo)
# set up a request session that can load file uris, so
# fixtures can be used as export data
self.session = requests.session()
self.session.mount('file://', LocalFileAdapter())
def test_get_datastream_info(self):
dsinfo = self.archex.get_datastream_info('''<foxml:datastreamVersion ID="DC.2" LABEL="Dublin Core" CREATED="2012-10-11T14:13:03.658Z" MIMETYPE="text/xml" FORMAT_URI="http://www.openarchives.org/OAI/2.0/oai_dc/" SIZE="771">
<foxml:contentDigest TYPE="MD5" DIGEST="f53aec07f2607f536bac7ee03dbbfe7c"/>''')
self.assertEqual('DC.2', dsinfo['id'])
self.assertEqual('text/xml', dsinfo['mimetype'])
self.assertEqual('771', dsinfo['size'])
self.assertEqual('MD5', dsinfo['type'])
self.assertEqual('f53aec07f2607f536bac7ee03dbbfe7c', dsinfo['digest'])
self.assertEqual('2012-10-11T14:13:03.658Z', dsinfo['created'])
# datastream info split across chunks
self.archex.end_of_last_chunk = '''<foxml:datastreamVersion ID="DC.2" LABEL="Dublin Core" CREATED="2012-10-11T14:13:03.658Z" MIMETYPE="te'''
dsinfo = self.archex.get_datastream_info('''xt/xml" FORMAT_URI="http://www.openarchives.org/OAI/2.0/oai_dc/" SIZE="771">
<foxml:contentDigest TYPE="MD5" DIGEST="f53aec07f2607f536bac7ee03dbbfe7c"/>''')
self.assertEqual('DC.2', dsinfo['id'])
self.assertEqual('text/xml', dsinfo['mimetype'])
self.assertEqual('f53aec07f2607f536bac7ee03dbbfe7c', dsinfo['digest'])
def test_object_data(self):
# mock api to read export data from a local fixture filie
response = self.session.get('file://%s' % FIXTURES['sync1_export'])
mockapi = Mock()
def mock_upload(data, *args, **kwargs):
list(data) # consume the generator so datastream processing happens
return 'uploaded://1'
mockapi.upload = mock_upload
mockapi.export.return_value = response
mockapi.base_url = 'http://fedora.example.co/fedora'
self.obj.api = self.repo.api = mockapi
data = self.archex.object_data()
foxml = data.getvalue()
self.assert_(etree.XML(foxml) is not None,
'object data should be valid xml')
self.assert_(b'foxml:binaryContent' not in foxml,
'object data for ingest should not include binaryContent tags')
self.assert_(b'<foxml:contentLocation REF="uploaded://1" TYPE="URL"/>' in foxml,
'object data for ingest should include upload id as content location')
# other tests?
# set read block size artificially low to test chunked handling
self.archex = ArchiveExport(self.obj, self.repo)
self.archex.read_block_size = 1024
data = self.archex.object_data()
foxml = data.getvalue()
self.assert_(etree.XML(foxml) is not None,
'object data should be valid xml')
self.assert_(b'foxml:binaryContent' not in foxml,
'object data for ingest should not include binaryContent tags')
self.assert_(b'<foxml:contentLocation REF="uploaded://1" TYPE="URL"/>' in foxml,
'object data for ingest should include upload id as content location')
# test with second fixture - multiple small encoded datastreams
self.archex = ArchiveExport(self.obj, self.repo)
self.archex.read_block_size = 1024
response = self.session.get('file://%s' % FIXTURES['sync2_export'])
mockapi.export.return_value = response
data = self.archex.object_data()
foxml = data.getvalue()
self.assert_(etree.XML(foxml) is not None,
'object data should be valid xml')
self.assert_(b'foxml:binaryContent' not in foxml,
'object data for ingest should not include binaryContent tags')
self.assert_(b'<foxml:contentLocation REF="uploaded://1" TYPE="URL"/>' in foxml,
'object data for ingest should include upload id as content location')
def test_object_data_split_bincontent(self):
# explictly test handling of binary content tag split over
# chunk boundaries
response = self.session.get('file://%s' % FIXTURES['sync1_export'])
mockapi = Mock()
def mock_upload(data, *args, **kwargs):
list(data) # consume the generator so datastream processing happens
return 'uploaded://1'
mockapi.upload = mock_upload
mockapi.export.return_value = response
self.obj.api = self.repo.api = mockapi
# test binary content tag split across chunks
self.archex = ArchiveExport(self.obj, self.repo)
# use a block size that will split the fixture in the middle of
# the first binary content tag
self.archex.read_block_size = 2688
data = self.archex.object_data()
foxml = data.getvalue()
self.assert_(etree.XML(foxml) is not None,
'object data should be valid xml')
self.assert_(b'foxml:binaryContent' not in foxml,
'object data for ingest should not include binaryContent tags')
self.archex = ArchiveExport(self.obj, self.repo)
# this blocksize ends with just the < in foxml:binaryContent
self.archex.read_block_size = 2680
data = self.archex.object_data()
foxml = data.getvalue()
self.assert_(etree.XML(foxml) is not None,
'object data should be valid xml')
self.assert_(b'foxml:binaryContent' not in foxml,
'object data for ingest should not include binaryContent tags')
self.archex = ArchiveExport(self.obj, self.repo)
# this blocksize ends with an unrelated close tag </
self.archex.read_block_size = 1526
data = self.archex.object_data()
foxml = data.getvalue()
self.assert_(etree.XML(foxml) is not None,
'object data should be valid xml')
self.assert_(b'foxml:binaryContent' not in foxml,
'object data for ingest should not include binaryContent tags')
def test_encoded_datastream(self):
# data content within a single chunk of data
mockapi = Mock()
mockapi.export.return_value = self.session.get('file://%s' % FIXTURES['sync1_export'])
mockapi.upload.return_value = 'uploaded://1'
self.obj.api = self.repo.api = mockapi
section = self.archex.get_next_section()
# get binary datastream info from first section
dsinfo = self.archex.get_datastream_info(section)
# fixture only has one binary content block
# get binarycontent tag out of the way
self.archex.get_next_section()
# next section will be file contents
self.archex.within_file = True
dscontent = b''.join(self.archex.encoded_datastream())
# check decoded size and MD5 match data from fixture
self.assertEqual(int(dsinfo['size']), len(dscontent))
self.assertEqual(dsinfo['digest'], md5sum(dscontent))
# data content across multiple chunks
mockapi.export.return_value = self.session.get('file://%s' % FIXTURES['sync1_export'])
self.obj.api = self.repo.api = mockapi
# set read block size artificially low to ensure
# datastream content is spread across multiple chunks
self.archex.read_block_size = 1024
finished = False
# iterate through the data, similar to object_data method,
# but only handle binary content
while not finished:
try:
section = self.archex.get_next_section()
except StopIteration:
finished = True
# find the section with starting binary content
if section == '<foxml:binaryContent>':
# then decode the subsequent content
self.archex.within_file = True
dscontent = ''.join(self.archex.encoded_datastream())
self.assertEqual(int(dsinfo['size']), len(dscontent))
self.assertEqual(dsinfo['digest'], md5sum(dscontent))
# stop processing
finished = True
class UtilsTest(unittest.TestCase):
def test_endswith_partial(self):
test_string = '<foxml:binaryContent>'
test_len = 19
txt = 'some content %s' % test_string[:test_len]
len_overlap = endswith_partial(txt, test_string)
self.assertEqual(test_len, len_overlap)
test_len = 5
txt = 'some content %s' % test_string[:test_len]
len_overlap = endswith_partial(txt, test_string)
self.assertEqual(test_len, len_overlap)
test_len = 1
txt = 'some content %s' % test_string[:test_len]
len_overlap = endswith_partial(txt, test_string)
self.assertEqual(test_len, len_overlap)
# no overlap
self.assertFalse(endswith_partial('some content', test_string))
def test_binarycontent_sections(self):
with open(FIXTURES['sync1_export'], 'rb') as sync1data:
sections = list(binarycontent_sections(sync1data.read()))
self.assertEqual(5, len(sections))
self.assertEqual(b'<foxml:binaryContent>', sections[1])
self.assertEqual(b'</foxml:binaryContent>', sections[3])
with open(FIXTURES['sync2_export'], 'rb') as sync1data:
sections = list(binarycontent_sections(sync1data.read()))
# second fixture should break into 17 sections
self.assertEqual(17, len(sections))
self.assertEqual(b'<foxml:binaryContent>', sections[1])
self.assertEqual(b'</foxml:binaryContent>', sections[3])
self.assertEqual(b'<foxml:binaryContent>', sections[5])
self.assertEqual(b'</foxml:binaryContent>', sections[7])
self.assertEqual(b'<foxml:binaryContent>', sections[9])
self.assertEqual(b'</foxml:binaryContent>', sections[11])
self.assertEqual(b'<foxml:binaryContent>', sections[13])
self.assertEqual(b'</foxml:binaryContent>', sections[15])
# requests file uri adapter, thanks to
# http://stackoverflow.com/questions/10123929/python-requests-fetch-a-file-from-a-local-url
class LocalFileAdapter(requests.adapters.BaseAdapter):
"""Protocol Adapter to allow Requests to GET file:// URLs
@todo: Properly handle non-empty hostname portions.
"""
@staticmethod
def _chkpath(method, path):
"""Return an HTTP status for the given filesystem path."""
if method.lower() in ('put', 'delete'):
return 501, "Not Implemented" # TODO
elif method.lower() not in ('get', 'head'):
return 405, "Method Not Allowed"
elif os.path.isdir(path):
return 400, "Path Not A File"
elif not os.path.isfile(path):
return 404, "File Not Found"
elif not os.access(path, os.R_OK):
return 403, "Access Denied"
else:
return 200, "OK"
def send(self, req, **kwargs): # pylint: disable=unused-argument
"""Return the file specified by the given request
@type req: C{PreparedRequest}
@todo: Should I bother filling `response.headers` and processing
If-Modified-Since and friends using `os.stat`?
"""
path = os.path.normcase(os.path.normpath(url2pathname(req.path_url)))
response = requests.Response()
response.status_code, response.reason = self._chkpath(req.method, path)
if response.status_code == 200 and req.method.lower() != 'head':
try:
response.raw = open(path, 'rb')
except (OSError, IOError) as err:
response.status_code = 500
response.reason = str(err)
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
response.request = req
response.connection = self
return response
def close(self):
pass
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base classes for storage engines
"""
import abc
from oslo_config import cfg
from oslo_db import api as db_api
import six
from magnum.common import profiler
_BACKEND_MAPPING = {'sqlalchemy': 'magnum.db.sqlalchemy.api'}
IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING,
lazy=True)
def get_instance():
"""Return a DB API instance."""
return IMPL
@profiler.trace_cls("db")
@six.add_metaclass(abc.ABCMeta)
class Connection(object):
"""Base class for storage system connections."""
@abc.abstractmethod
def __init__(self):
"""Constructor."""
@abc.abstractmethod
def get_cluster_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
"""Get matching clusters.
Return a list of the specified columns for all clusters that match the
specified filters.
:param context: The security context
:param filters: Filters to apply. Defaults to None.
:param limit: Maximum number of clusters to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
@abc.abstractmethod
def create_cluster(self, values):
"""Create a new cluster.
:param values: A dict containing several items used to identify
and track the cluster, and several dicts which are
passed into the Drivers when managing this cluster.
For example:
::
{
'uuid': uuidutils.generate_uuid(),
'name': 'example',
'type': 'virt'
}
:returns: A cluster.
"""
@abc.abstractmethod
def get_cluster_by_id(self, context, cluster_id):
"""Return a cluster.
:param context: The security context
:param cluster_id: The id of a cluster.
:returns: A cluster.
"""
@abc.abstractmethod
def get_cluster_by_uuid(self, context, cluster_uuid):
"""Return a cluster.
:param context: The security context
:param cluster_uuid: The uuid of a cluster.
:returns: A cluster.
"""
@abc.abstractmethod
def get_cluster_by_name(self, context, cluster_name):
"""Return a cluster.
:param context: The security context
:param cluster_name: The name of a cluster.
:returns: A cluster.
"""
@abc.abstractmethod
def get_cluster_stats(self, context, project_id):
"""Return clusters stats for the given project.
:param context: The security context
:param project_id: The project id.
:returns: clusters, nodes count.
"""
@abc.abstractmethod
def get_cluster_count_all(self, context, filters=None):
"""Get count of matching clusters.
:param context: The security context
:param filters: Filters to apply. Defaults to None.
:returns: Count of matching clusters.
"""
@abc.abstractmethod
def destroy_cluster(self, cluster_id):
"""Destroy a cluster and all associated interfaces.
:param cluster_id: The id or uuid of a cluster.
"""
@abc.abstractmethod
def update_cluster(self, cluster_id, values):
"""Update properties of a cluster.
:param cluster_id: The id or uuid of a cluster.
:returns: A cluster.
:raises: ClusterNotFound
"""
@abc.abstractmethod
def get_cluster_template_list(self, context, filters=None,
limit=None, marker=None, sort_key=None,
sort_dir=None):
"""Get matching ClusterTemplates.
Return a list of the specified columns for all ClusterTemplates that
match the specified filters.
:param context: The security context
:param filters: Filters to apply. Defaults to None.
:param limit: Maximum number of ClusterTemplates to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
@abc.abstractmethod
def create_cluster_template(self, values):
"""Create a new ClusterTemplate.
:param values: A dict containing several items used to identify
and track the ClusterTemplate, and several dicts which
are passed into the Drivers when managing this
ClusterTemplate.
For example:
::
{
'uuid': uuidutils.generate_uuid(),
'name': 'example',
'type': 'virt'
}
:returns: A ClusterTemplate.
"""
@abc.abstractmethod
def get_cluster_template_by_id(self, context, cluster_template_id):
"""Return a ClusterTemplate.
:param context: The security context
:param cluster_template_id: The id of a ClusterTemplate.
:returns: A ClusterTemplate.
"""
@abc.abstractmethod
def get_cluster_template_by_uuid(self, context, cluster_template_uuid):
"""Return a ClusterTemplate.
:param context: The security context
:param cluster_template_uuid: The uuid of a ClusterTemplate.
:returns: A ClusterTemplate.
"""
@abc.abstractmethod
def get_cluster_template_by_name(self, context, cluster_template_name):
"""Return a ClusterTemplate.
:param context: The security context
:param cluster_template_name: The name of a ClusterTemplate.
:returns: A ClusterTemplate.
"""
@abc.abstractmethod
def destroy_cluster_template(self, cluster_template_id):
"""Destroy a ClusterTemplate and all associated interfaces.
:param cluster_template_id: The id or uuid of a ClusterTemplate.
"""
@abc.abstractmethod
def update_cluster_template(self, cluster_template_id, values):
"""Update properties of a ClusterTemplate.
:param cluster_template_id: The id or uuid of a ClusterTemplate.
:returns: A ClusterTemplate.
:raises: ClusterTemplateNotFound
"""
@abc.abstractmethod
def create_x509keypair(self, values):
"""Create a new x509keypair.
:param values: A dict containing several items used to identify
and track the x509keypair, and several dicts which
are passed into the Drivers when managing this
x509keypair. For example:
::
{
'uuid': uuidutils.generate_uuid(),
'certificate': 'AAA...',
'private_key': 'BBB...',
'private_key_passphrase': 'CCC...',
'intermediates': 'DDD...',
}
:returns: A X509KeyPair.
"""
@abc.abstractmethod
def get_x509keypair_by_id(self, context, x509keypair_id):
"""Return a x509keypair.
:param context: The security context
:param x509keypair_id: The id of a x509keypair.
:returns: A x509keypair.
"""
@abc.abstractmethod
def get_x509keypair_by_uuid(self, context, x509keypair_uuid):
"""Return a x509keypair.
:param context: The security context
:param x509keypair_uuid: The uuid of a x509keypair.
:returns: A x509keypair.
"""
@abc.abstractmethod
def destroy_x509keypair(self, x509keypair_id):
"""Destroy a x509keypair.
:param x509keypair_id: The id or uuid of a x509keypair.
"""
@abc.abstractmethod
def update_x509keypair(self, x509keypair_id, values):
"""Update properties of a X509KeyPair.
:param x509keypair_id: The id or uuid of a X509KeyPair.
:returns: A X509KeyPair.
"""
@abc.abstractmethod
def get_x509keypair_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
"""Get matching x509keypairs.
Return a list of the specified columns for all x509keypairs
that match the specified filters.
:param context: The security context
:param filters: Filters to apply. Defaults to None.
:param limit: Maximum number of x509keypairs to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
@abc.abstractmethod
def destroy_magnum_service(self, magnum_service_id):
"""Destroys a magnum_service record.
:param magnum_service_id: The id of a magnum_service.
"""
@abc.abstractmethod
def update_magnum_service(self, magnum_service_id, values):
"""Update properties of a magnum_service.
:param magnum_service_id: The id of a magnum_service record.
"""
@abc.abstractmethod
def get_magnum_service_by_host_and_binary(self, host, binary):
"""Return a magnum_service record.
:param host: The host where the binary is located.
:param binary: The name of the binary.
:returns: A magnum_service record.
"""
@abc.abstractmethod
def create_magnum_service(self, values):
"""Create a new magnum_service record.
:param values: A dict containing several items used to identify
and define the magnum_service record.
:returns: A magnum_service record.
"""
@abc.abstractmethod
def get_magnum_service_list(self, disabled=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
"""Get matching magnum_service records.
Return a list of the specified columns for all magnum_services
those match the specified filters.
:param disabled: Filters disbaled services. Defaults to None.
:param limit: Maximum number of magnum_services to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
@abc.abstractmethod
def create_quota(self, values):
"""Create a new Quota record for a resource in a project.
:param values: A dict containing several items used to identify
and track quota for a resource in a project.
::
{
'id': uuidutils.generate_uuid(),
'project_id': 'fake_project',
'resource': 'fake_resource',
'hard_limit': 'fake_hardlimit',
}
:returns: A quota record.
"""
@abc.abstractmethod
def update_quota(self, project_id, values):
"""Update quota record.
:param project_id: The project id.
:param values: A dict containing several items used to identify
and track quota for a resource in a project.
::
{
'id': uuidutils.generate_uuid(),
'project_id': 'fake_project',
'resource': 'fake_resource',
'hard_limit': 'fake_hardlimit',
}
:returns: A quota record.
"""
@abc.abstractmethod
def delete_quota(self, project_id, resource):
"""Delete a quota.
:param project_id: Project id.
:param resource: resource name.
"""
@abc.abstractmethod
def get_quota_by_id(self, context, quota_id):
"""Return a quota.
:param context: The security context
:param quota_id: The id of a quota.
:returns: A quota.
"""
@abc.abstractmethod
def get_quota_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
"""Get quota list.
Return a list of the specified columns for all quotas that match the
specified filters.
:param context: The security context
:param filters: Filters to apply. Defaults to None.
:param limit: Maximum number of clusters to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
@abc.abstractmethod
def quota_get_all_by_project_id(self, project_id):
"""Gets Quota record for all the resources in a project.
:param project_id: Project identifier of the project.
:returns: Quota record for all resources in a project.
"""
@abc.abstractmethod
def get_quota_by_project_id_resource(self, project_id, resource):
"""Gets quota record for the given quota id.
:param project_id: project id.
:param resource: resource name.
:returns: Quota record.
"""
@abc.abstractmethod
def get_federation_by_id(self, context, federation_id):
"""Return a federation for a given federation id.
:param context: The security context
:param federation_id: The id of a federation
:returns: A federation
"""
@abc.abstractmethod
def get_federation_by_uuid(self, context, federation_uuid):
"""Return a federation for a given federation uuid.
:param context: The security context
:param federation_uuid: The uuid of a federation
:returns: A federation
"""
@abc.abstractmethod
def get_federation_by_name(self, context, federation_name):
"""Return a federation for a given federation name.
:param context: The security context
:param federation_name: The name of a federation
:returns: A federation
"""
@abc.abstractmethod
def get_federation_list(self, context, limit=None, marker=None,
sort_key=None, sort_dir=None, filters=None):
"""Get matching federations.
Return a list of the specified columns for all federations that
match the specified filters.
:param context: The security context
:param filters: Filters to apply. Defaults to None.
:param limit: Maximum number of federations to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
@abc.abstractmethod
def create_federation(self, values):
"""Create a new federation.
:param values: A dict containing several items used to identify
and track the federation.
For example:
::
{
'uuid': uuidutils.generate_uuid(),
'name': 'example',
'hostcluster_id': '91c8dd07-14a2-4fd8-b084-915fa53552fd',
'properties': 'dns-zone:example.com.'
}
:returns: A federation.
"""
@abc.abstractmethod
def destroy_federation(self, federation_id):
"""Destroy a federation.
This action *will not* destroy the host cluster nor the member
clusters.
:param federation_id: The id or uuid of a federation.
"""
@abc.abstractmethod
def update_federation(self, federation_id, values):
"""Update properties of a federation.
:param federation_id: The id or uuid of a federation.
:param values: A dict containing several items used to identify
and track the federation.
For example:
::
{
'uuid': uuidutils.generate_uuid(),
'name': 'example',
'hostcluster_id': '91c8dd07-14a2-4fd8-b084-915fa53552fd',
'properties': 'dns-zone:example.com.'
}
:returns: A federation.
:raises: FederationNotFound
"""
@abc.abstractmethod
def create_nodegroup(self, values):
"""Create a new nodegroup in cluster.
:param values: A dict containing several items used to identify
and track the nodegroup.
For example:
::
{
'uuid': uuidutils.generate_uuid(),
'name': 'example',
...
}
:returns: A nodegroup record.
"""
@abc.abstractmethod
def destroy_nodegroup(self, cluster_id, nodegroup_id):
"""Destroy a nodegroup.
:param cluster_id: The uuid of the cluster where the nodegroup
belongs to.
:param nodegroup_id: The id or uuid of the nodegroup
"""
@abc.abstractmethod
def update_nodegroup(self, cluster_id, nodegroup_id, values):
"""Update properties of a nodegroup.
:param cluster_id: The uuid of the cluster where the nodegroup
belongs to.
:param nodegroup_id: The id or uuid of a nodegroup.
:param values: A dict containing several items used to identify
and track the nodegroup.
For example:
::
{
'uuid': uuidutils.generate_uuid(),
'name': 'example',
...
}
:returns: A nodegroup record.
:raises: NodeGroupNotFound
"""
@abc.abstractmethod
def get_nodegroup_by_id(self, context, cluster_id, nodegroup_id):
"""Return a nodegroup for a given cluster uuid and nodegroup id.
:param cluster_id: The uuid of the cluster where the nodegroup
belongs to.
:param nodegroup_id: The id of a nodegroup.
:returns: A nodegroup record.
:raises: NodeGroupNotFound
"""
@abc.abstractmethod
def get_nodegroup_by_uuid(self, context, cluster_id, nodegroup_uuid):
"""Return a nodegroup for a given cluster uuid and nodegroup uuid.
:param cluster_id: The uuid of the cluster where the nodegroup
belongs to.
:param nodegroup_uuid: The uuid of a nodegroup.
:returns: A nodegroup record.
:raises: NodeGroupNotFound
"""
@abc.abstractmethod
def get_nodegroup_by_name(self, context, cluster_id, nodegroup_name):
"""Return a nodegroup for a given cluster uuid and nodegroup name.
:param cluster_id: The uuid of the cluster where the nodegroup
belongs to.
:param nodegroup_name: The name of a nodegroup.
:returns: A nodegroup record.
:raises: NodeGroupNotFound
"""
@abc.abstractmethod
def list_cluster_nodegroups(self, context, cluster_id, filters=None,
limit=None, marker=None, sort_key=None,
sort_dir=None):
"""Get matching nodegroups in a given cluster.
:param context: The security context
:param cluster_id: The uuid of the cluster where the nodegroup
belongs to.
:param filters: Filters to apply. Defaults to None.
:param limit: Maximum number of nodegroups to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
:returns: A list of nodegroup records.
"""
@abc.abstractmethod
def get_cluster_nodegroup_count(self, context, cluster_id):
"""Get count of nodegroups in a given cluster.
:param cluster_id: The uuid of the cluster where the nodegroup
belongs to.
:returns: Count of matching clusters.
"""
|
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for cursors.
These classes centralize common code.
"""
from vtdb import dbexceptions
class BasePEP0249Cursor(object):
"""Cursor with common PEP0249 implementations."""
def __init__(self):
self._clear_common_state()
self._conn = None
def callproc(self):
"""For PEP 0249."""
raise dbexceptions.NotSupportedError
def executemany(self, sql, params_list):
"""For PEP 0249."""
_ = sql, params_list
raise dbexceptions.NotSupportedError
def nextset(self):
"""For PEP 0249."""
raise dbexceptions.NotSupportedError
def setinputsizes(self, sizes):
"""For PEP 0249."""
_ = sizes
def setoutputsize(self, size, column=None):
"""For PEP 0249."""
_ = size, column
@property
def rownumber(self):
return self.index
def __iter__(self):
"""For PEP 0249: To make cursors compatible to the iteration protocol."""
return self
def next(self):
"""For PEP 0249."""
val = self.fetchone()
if val is None:
raise StopIteration
return val
def close(self):
"""For PEP 0249."""
raise NotImplementedError
def fetchone(self):
"""For PEP 0249."""
raise NotImplementedError
def fetchmany(self, size=None):
"""For PEP 0249."""
raise NotImplementedError
def fetchall(self):
"""For PEP 0249."""
raise NotImplementedError
def _clear_common_state(self):
self.index = 0
@property
def connection(self):
if not self._conn:
raise dbexceptions.ProgrammingError(
'Cannot use closed cursor %s.' % self.__class__)
return self._conn
class BaseListCursor(BasePEP0249Cursor):
"""Base cursor where results are stored as a list.
Execute call should return a (results, rowcount, lastrowid,
description) tuple. The fetch commands traverse self.results.
"""
arraysize = 1
def __init__(self, single_db=False, twopc=False):
super(BaseListCursor, self).__init__()
self._clear_list_state()
self.effective_caller_id = None
self.single_db = single_db
self.twopc = twopc
def _clear_list_state(self):
self._clear_common_state()
self.description = None
self.lastrowid = None
self.rowcount = None
self.results = None
def set_effective_caller_id(self, effective_caller_id):
"""Set the effective caller id that will be used in upcoming calls."""
self.effective_caller_id = effective_caller_id
def begin(self):
return self.connection.begin(
effective_caller_id=self.effective_caller_id,
single_db=self.single_db)
def commit(self):
return self.connection.commit(self.twopc)
def rollback(self):
return self.connection.rollback()
def _check_fetch(self):
if self.results is None:
raise dbexceptions.ProgrammingError('Fetch called before execute.')
def _handle_transaction_sql(self, sql):
sql_check = sql.strip().lower()
if sql_check == 'begin':
self.begin()
return True
elif sql_check == 'commit':
self.commit()
return True
elif sql_check == 'rollback':
self.rollback()
return True
else:
return False
def close(self):
self._clear_list_state()
self._conn = None
def fetchone(self):
self._check_fetch()
if self.index >= len(self.results):
return None
self.index += 1
return self.results[self.index - 1]
def fetchmany(self, size=None):
self._check_fetch()
if self.index >= len(self.results):
return []
if size is None:
size = self.arraysize
res = self.results[self.index:self.index + size]
self.index += size
return res
def fetchall(self):
self._check_fetch()
return self.fetchmany(len(self.results) - self.index)
class BaseStreamCursor(BasePEP0249Cursor):
"""Base cursor where results are returned as a generator.
This supports large queries. An execute call returns a (generator,
description) pair. The fetch functions read items from the generator
until it is exhausted.
"""
arraysize = 1
def __init__(self):
super(BaseStreamCursor, self).__init__()
self._clear_stream_state()
self.effective_caller_id = None
def set_effective_caller_id(self, effective_caller_id):
"""Set the effective caller id that will be used in upcoming calls."""
self.effective_caller_id = effective_caller_id
def _clear_stream_state(self):
self._clear_common_state()
self.description = None
self.generator = None
def fetchone(self):
if self.description is None:
raise dbexceptions.ProgrammingError('Fetch called before execute.')
self.index += 1
try:
return self.generator.next()
except StopIteration:
return None
# fetchmany can be called until it returns no rows. Returning less rows
# than what we asked for is also an indication we ran out, but the cursor
# API in PEP249 is silent about that.
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
result = []
for _ in xrange(size):
row = self.fetchone()
if row is None:
break
result.append(row)
return result
def fetchall(self):
result = []
while True:
row = self.fetchone()
if row is None:
break
result.append(row)
return result
def close(self):
if self.generator:
self.generator.close()
self._clear_stream_state()
self._conn = None
|
|
from direct.showbase.DirectObject import DirectObject
from DirectGlobals import *
from DirectUtil import *
from DirectGeometry import *
COA_ORIGIN = 0
COA_CENTER = 1
# MRM: To do: handle broken node paths in selected and deselected dicts
class DirectNodePath(NodePath):
# A node path augmented with info, bounding box, and utility methods
def __init__(self, nodePath, bboxColor=None):
# Initialize the superclass
NodePath.__init__(self)
self.assign(nodePath)
# Create a bounding box
self.bbox = DirectBoundingBox(self, bboxColor)
center = self.bbox.getCenter()
# Create matrix to hold the offset between the nodepath
# and its center of action (COA)
self.mCoa2Dnp = Mat4(Mat4.identMat())
if base.direct.coaMode == COA_CENTER:
self.mCoa2Dnp.setRow(3, Vec4(center[0], center[1], center[2], 1))
# Transform from nodePath to widget
self.tDnp2Widget = TransformState.makeIdentity()
def highlight(self, fRecompute = 1):
if fRecompute:
pass
#self.bbox.recompute()
self.bbox.show()
def dehighlight(self):
self.bbox.hide()
def getCenter(self):
return self.bbox.getCenter()
def getRadius(self):
return self.bbox.getRadius()
def getMin(self):
return self.bbox.getMin()
def getMax(self):
return self.bbox.getMax()
class SelectedNodePaths(DirectObject):
def __init__(self):
self.reset()
self.tagList = []
def addTag(self, tag):
if tag not in self.tagList:
self.tagList.append(tag)
def removeTag(self, tag):
self.tagList.remove(tag)
def reset(self):
self.selectedDict = {}
self.selectedList = [] # [gjeon] to maintain selected order
self.deselectedDict = {}
__builtins__["last"] = self.last = None
def select(self, nodePath, fMultiSelect = 0, fSelectTag = 1):
""" Select the specified node path. Multiselect as required """
# Do nothing if nothing selected
if not nodePath:
print 'Nothing selected!!'
return None
# Reset selected objects and highlight if multiSelect is false
if not fMultiSelect:
self.deselectAll()
# Select tagged object if present
if fSelectTag:
for tag in self.tagList:
if nodePath.hasNetTag(tag):
nodePath = nodePath.findNetTag(tag)
break
# Get this pointer
id = nodePath.get_key()
# First see if its already in the selected dictionary
dnp = self.getSelectedDict(id)
# If so, deselect it
if dnp:
self.deselect(nodePath)
return None
else:
# See if it is in the deselected dictionary
dnp = self.getDeselectedDict(id)
if dnp:
# Remove it from the deselected dictionary
del(self.deselectedDict[id])
# Show its bounding box
dnp.highlight()
else:
# Didn't find it, create a new selectedNodePath instance
dnp = DirectNodePath(nodePath)
# Show its bounding box
dnp.highlight(fRecompute = 0)
# Add it to the selected dictionary
self.selectedDict[dnp.get_key()] = dnp
self.selectedList.append(dnp) # [gjeon]
# And update last
__builtins__["last"] = self.last = dnp
# Update cluster servers if this is a cluster client
if base.direct.clusterMode == 'client':
cluster.selectNodePath(dnp)
return dnp
def deselect(self, nodePath):
""" Deselect the specified node path """
# Get this pointer
id = nodePath.get_key()
# See if it is in the selected dictionary
dnp = self.getSelectedDict(id)
if dnp:
# It was selected:
# Hide its bounding box
dnp.dehighlight()
# Remove it from the selected dictionary
del(self.selectedDict[id])
if dnp in self.selectedList: # [gjeon]
self.selectedList.remove(dnp)
# And keep track of it in the deselected dictionary
self.deselectedDict[id] = dnp
# Send a message
messenger.send('DIRECT_deselectedNodePath', [dnp])
# Update cluster servers if this is a cluster client
if base.direct.clusterMode == 'client':
cluster.deselectNodePath(dnp)
return dnp
def getSelectedAsList(self):
"""
Return a list of all selected node paths. No verification of
connectivity is performed on the members of the list
"""
#return self.selectedDict.values()[:]
return self.selectedList[:] # [gjeon] now return the list with selected order
def __getitem__(self, index):
return self.getSelectedAsList()[index]
def getSelectedDict(self, id):
"""
Search selectedDict for node path, try to repair broken node paths.
"""
dnp = self.selectedDict.get(id, None)
if dnp:
return dnp
else:
# Not in selected dictionary
return None
def getDeselectedAsList(self):
return self.deselectedDict.values()[:]
def getDeselectedDict(self, id):
"""
Search deselectedDict for node path, try to repair broken node paths.
"""
dnp = self.deselectedDict.get(id, None)
if dnp:
# Yes
return dnp
else:
# Not in deselected dictionary
return None
def forEachSelectedNodePathDo(self, func):
"""
Perform given func on selected node paths. No node path
connectivity verification performed
"""
selectedNodePaths = self.getSelectedAsList()
for nodePath in selectedNodePaths:
func(nodePath)
def forEachDeselectedNodePathDo(self, func):
"""
Perform given func on deselected node paths. No node path
connectivity verification performed
"""
deselectedNodePaths = self.getDeselectedAsList()
for nodePath in deselectedNodePaths:
func(nodePath)
def getWrtAll(self):
self.forEachSelectedNodePathDo(self.getWrt)
def getWrt(self, nodePath):
nodePath.tDnp2Widget = nodePath.getTransform(base.direct.widget)
def moveWrtWidgetAll(self):
self.forEachSelectedNodePathDo(self.moveWrtWidget)
def moveWrtWidget(self, nodePath):
nodePath.setTransform(base.direct.widget, nodePath.tDnp2Widget)
def deselectAll(self):
self.forEachSelectedNodePathDo(self.deselect)
def highlightAll(self):
self.forEachSelectedNodePathDo(DirectNodePath.highlight)
def dehighlightAll(self):
self.forEachSelectedNodePathDo(DirectNodePath.dehighlight)
def removeSelected(self):
selected = self.last
if selected:
selected.remove()
__builtins__["last"] = self.last = None
def removeAll(self):
# Remove all selected nodePaths from the Scene Graph
self.forEachSelectedNodePathDo(NodePath.remove)
def toggleVisSelected(self):
selected = self.last
# Toggle visibility of selected node paths
if selected:
selected.toggleVis()
def toggleVisAll(self):
# Toggle viz for all selected node paths
self.forEachSelectedNodePathDo(NodePath.toggleVis)
def isolateSelected(self):
selected = self.last
if selected:
selected.isolate()
def getDirectNodePath(self, nodePath):
# Get this pointer
id = nodePath.get_key()
# First check selected dict
dnp = self.getSelectedDict(id)
if dnp:
return dnp
# Otherwise return result of deselected search
return self.getDeselectedDict(id)
def getNumSelected(self):
return len(self.selectedDict.keys())
class DirectBoundingBox:
def __init__(self, nodePath, bboxColor=None):
# Record the node path
self.nodePath = nodePath
# Compute bounds, min, max, etc.
self.computeTightBounds()
# Generate the bounding box
self.lines = self.createBBoxLines(bboxColor)
def recompute(self):
# Compute bounds, min, max, etc.
self.computeTightBounds()
self.updateBBoxLines()
def computeTightBounds(self):
# Compute bounding box using tighter calcTightBounds function
# Need to clear out existing transform on node path
tMat = Mat4(self.nodePath.getMat())
self.nodePath.clearMat()
# Get bounds
self.min = Point3(0)
self.max = Point3(0)
self.nodePath.calcTightBounds(self.min, self.max)
# Calc center and radius
self.center = Point3((self.min + self.max)/2.0)
self.radius = Vec3(self.max - self.min).length()
# Restore transform
self.nodePath.setMat(tMat)
del tMat
def computeBounds(self):
self.bounds = self.getBounds()
if self.bounds.isEmpty() or self.bounds.isInfinite():
self.center = Point3(0)
self.radius = 1.0
else:
self.center = self.bounds.getCenter()
self.radius = self.bounds.getRadius()
self.min = Point3(self.center - Point3(self.radius))
self.max = Point3(self.center + Point3(self.radius))
def createBBoxLines(self, bboxColor=None):
# Create a line segments object for the bbox
lines = LineNodePath(hidden)
lines.node().setName('bboxLines')
if (bboxColor):
lines.setColor(VBase4(*bboxColor))
else:
lines.setColor(VBase4(1., 0., 0., 1.))
lines.setThickness(0.5)
minX = self.min[0]
minY = self.min[1]
minZ = self.min[2]
maxX = self.max[0]
maxY = self.max[1]
maxZ = self.max[2]
# Bottom face
lines.moveTo(minX, minY, minZ)
lines.drawTo(maxX, minY, minZ)
lines.drawTo(maxX, maxY, minZ)
lines.drawTo(minX, maxY, minZ)
lines.drawTo(minX, minY, minZ)
# Front Edge/Top face
lines.drawTo(minX, minY, maxZ)
lines.drawTo(maxX, minY, maxZ)
lines.drawTo(maxX, maxY, maxZ)
lines.drawTo(minX, maxY, maxZ)
lines.drawTo(minX, minY, maxZ)
# Three remaining edges
lines.moveTo(maxX, minY, minZ)
lines.drawTo(maxX, minY, maxZ)
lines.moveTo(maxX, maxY, minZ)
lines.drawTo(maxX, maxY, maxZ)
lines.moveTo(minX, maxY, minZ)
lines.drawTo(minX, maxY, maxZ)
# Create and return bbox lines
lines.create()
# Make sure bbox is never lit or drawn in wireframe
useDirectRenderStyle(lines)
return lines
def setBoxColorScale(self, r, g, b, a):
if (self.lines):
self.lines.reset()
self.lines = None
self.lines = self.createBBoxLines((r, g, b, a))
self.show()
def updateBBoxLines(self):
ls = self.lines.lineSegs
minX = self.min[0]
minY = self.min[1]
minZ = self.min[2]
maxX = self.max[0]
maxY = self.max[1]
maxZ = self.max[2]
# Bottom face
ls.setVertex(0, minX, minY, minZ)
ls.setVertex(1, maxX, minY, minZ)
ls.setVertex(2, maxX, maxY, minZ)
ls.setVertex(3, minX, maxY, minZ)
ls.setVertex(4, minX, minY, minZ)
# Front Edge/Top face
ls.setVertex(5, minX, minY, maxZ)
ls.setVertex(6, maxX, minY, maxZ)
ls.setVertex(7, maxX, maxY, maxZ)
ls.setVertex(8, minX, maxY, maxZ)
ls.setVertex(9, minX, minY, maxZ)
# Three remaining edges
ls.setVertex(10, maxX, minY, minZ)
ls.setVertex(11, maxX, minY, maxZ)
ls.setVertex(12, maxX, maxY, minZ)
ls.setVertex(13, maxX, maxY, maxZ)
ls.setVertex(14, minX, maxY, minZ)
ls.setVertex(15, minX, maxY, maxZ)
def getBounds(self):
# Get a node path's bounds
nodeBounds = BoundingSphere()
nodeBounds.extendBy(self.nodePath.node().getInternalBound())
for child in self.nodePath.getChildren():
nodeBounds.extendBy(child.getBounds())
return nodeBounds.makeCopy()
def show(self):
self.lines.reparentTo(self.nodePath)
def hide(self):
self.lines.reparentTo(hidden)
def getCenter(self):
return self.center
def getRadius(self):
return self.radius
def getMin(self):
return self.min
def getMax(self):
return self.max
def vecAsString(self, vec):
return '%.2f %.2f %.2f' % (vec[0], vec[1], vec[2])
def __repr__(self):
return (repr(self.__class__) +
'\nNodePath:\t%s\n' % self.nodePath.getName() +
'Min:\t\t%s\n' % self.vecAsString(self.min) +
'Max:\t\t%s\n' % self.vecAsString(self.max) +
'Center:\t\t%s\n' % self.vecAsString(self.center) +
'Radius:\t\t%.2f' % self.radius
)
class SelectionQueue(CollisionHandlerQueue):
def __init__(self, parentNP = None):
if parentNP is None:
parentNP = render
# Initialize the superclass
CollisionHandlerQueue.__init__(self)
# Current index and entry in collision queue
self.index = -1
self.entry = None
self.skipFlags = SKIP_NONE
# Create a collision node path attached to the given NP
self.collisionNodePath = NodePath(CollisionNode("collisionNP"))
self.setParentNP(parentNP)
# Don't pay the penalty of drawing this collision ray
self.collisionNodePath.hide()
self.collisionNode = self.collisionNodePath.node()
# Intersect with geometry to begin with
self.collideWithGeom()
# And a traverser to do the actual collision tests
self.ct = CollisionTraverser("DirectSelection")
self.ct.setRespectPrevTransform(False)
# Let the traverser know about the collision node and the queue
self.ct.addCollider(self.collisionNodePath, self)
# List of objects that can't be selected
self.unpickable = UNPICKABLE
# Derived class must add Collider to complete initialization
def setParentNP(self, parentNP):
# Update collisionNodePath's parent
self.collisionNodePath.reparentTo(parentNP)
def addCollider(self, collider):
# Inherited class must call this function to specify collider object
# Record collision object
self.collider = collider
# Add the collider to the collision Node
self.collisionNode.addSolid(self.collider)
def collideWithBitMask(self, bitMask):
# The into collide mask is the bit pattern colliders look at
# when deciding whether or not to test for a collision "into"
# this collision solid. Set to all Off so this collision solid
# will not be considered in any collision tests
self.collisionNode.setIntoCollideMask(BitMask32().allOff())
# The from collide mask is the bit pattern *this* collision solid
# compares against the into collide mask of candidate collision solids
# Turn this mask all off since we're not testing for collisions against
# collision solids
self.collisionNode.setFromCollideMask(bitMask)
def collideWithGeom(self):
# The into collide mask is the bit pattern colliders look at
# when deciding whether or not to test for a collision "into"
# this collision solid. Set to all Off so this collision solid
# will not be considered in any collision tests
self.collisionNode.setIntoCollideMask(BitMask32().allOff())
# The from collide mask is the bit pattern *this* collision solid
# compares against the into collide mask of candidate collision solids
# Turn this mask all off since we're not testing for collisions against
# collision solids
self.collisionNode.setFromCollideMask(GeomNode.getDefaultCollideMask())
def collideWithWidget(self):
# This collision node should not be tested against by any other
# collision solids
self.collisionNode.setIntoCollideMask(BitMask32().allOff())
# This collision node will test for collisions with any collision
# solids with a bit mask set to 0x80000000
mask = BitMask32()
mask.setWord(0x80000000)
self.collisionNode.setFromCollideMask(mask)
def addUnpickable(self, item):
if item not in self.unpickable:
self.unpickable.append(item)
def removeUnpickable(self, item):
if item in self.unpickable:
self.unpickable.remove(item)
def setCurrentIndex(self, index):
if (index < 0) or (index >= self.getNumEntries()):
self.index = -1
else:
self.index = index
def setCurrentEntry(self, entry):
self.entry = entry
def getCurrentEntry(self):
return self.entry
def isEntryBackfacing(self, entry):
# If dot product of collision point surface normal and
# ray from camera to collision point is positive, we are
# looking at the backface of the polygon
if not entry.hasSurfaceNormal():
# Well, no way to tell. Assume we're not backfacing.
return 0
if direct:
cam = base.direct.cam
else:
cam = base.cam
fromNodePath = entry.getFromNodePath()
v = Vec3(entry.getSurfacePoint(fromNodePath))
n = entry.getSurfaceNormal(fromNodePath)
# Convert to camera space for backfacing test
if self.collisionNodePath.getParent() != cam:
# Problem: assumes base.cam is the camera in question
p2cam = self.collisionNodePath.getParent().getMat(cam)
v = Vec3(p2cam.xformPoint(v))
n = p2cam.xformVec(n)
# Normalize and check angle between to vectors
v.normalize()
return v.dot(n) >= 0
def findNextCollisionEntry(self, skipFlags = SKIP_NONE):
return self.findCollisionEntry(skipFlags, self.index + 1)
def findCollisionEntry(self, skipFlags = SKIP_NONE, startIndex = 0):
# Init self.index and self.entry
self.setCurrentIndex(-1)
self.setCurrentEntry(None)
# Pick out the closest object that isn't a widget
for i in range(startIndex, self.getNumEntries()):
entry = self.getEntry(i)
nodePath = entry.getIntoNodePath()
if (skipFlags & SKIP_HIDDEN) and nodePath.isHidden():
# Skip if hidden node
pass
elif (skipFlags & SKIP_BACKFACE) and self.isEntryBackfacing(entry):
# Skip, if backfacing poly
pass
elif ((skipFlags & SKIP_CAMERA) and
(camera in nodePath.getAncestors())):
# Skip if parented to a camera.
pass
# Can pick unpickable, use the first visible node
elif ((skipFlags & SKIP_UNPICKABLE) and
(nodePath.getName() in self.unpickable)):
# Skip if in unpickable list
pass
elif base.direct and\
((skipFlags & SKIP_WIDGET) and
(nodePath.getTag('WidgetName') != base.direct.widget.getName())):
# Skip if this widget part is not belong to current widget
pass
elif base.direct and\
((skipFlags & SKIP_WIDGET) and base.direct.fControl and
(nodePath.getName()[2:] == 'ring')):
# Skip when ununiformly scale in ortho view
pass
else:
self.setCurrentIndex(i)
self.setCurrentEntry(entry)
break
return self.getCurrentEntry()
class SelectionRay(SelectionQueue):
def __init__(self, parentNP = None):
if parentNP is None:
parentNP = render
# Initialize the superclass
SelectionQueue.__init__(self, parentNP)
self.addCollider(CollisionRay())
def pick(self, targetNodePath, xy = None):
# Determine ray direction based upon the mouse coordinates
if xy:
mx = xy[0]
my = xy[1]
elif direct:
mx = base.direct.dr.mouseX
my = base.direct.dr.mouseY
else:
if not base.mouseWatcherNode.hasMouse():
# No mouse in window.
self.clearEntries()
return
mx = base.mouseWatcherNode.getMouseX()
my = base.mouseWatcherNode.getMouseY()
if direct:
self.collider.setFromLens(base.direct.camNode, mx, my)
else:
self.collider.setFromLens(base.camNode, mx, my)
self.ct.traverse(targetNodePath)
self.sortEntries()
def pickBitMask(self, bitMask = BitMask32.allOff(),
targetNodePath = None,
skipFlags = SKIP_ALL):
if parentNodePath is None:
parentNodePath = render
self.collideWithBitMask(bitMask)
self.pick(targetNodePath)
# Determine collision entry
return self.findCollisionEntry(skipFlags)
def pickGeom(self, targetNodePath = None, skipFlags = SKIP_ALL,
xy = None):
if targetNodePath is None:
targetNodePath = render
self.collideWithGeom()
self.pick(targetNodePath, xy = xy)
# Determine collision entry
return self.findCollisionEntry(skipFlags)
def pickWidget(self, targetNodePath = None, skipFlags = SKIP_NONE):
if targetNodePath is None:
targetNodePath = render
self.collideWithWidget()
self.pick(targetNodePath)
# Determine collision entry
return self.findCollisionEntry(skipFlags)
def pick3D(self, targetNodePath, origin, dir):
# Determine ray direction based upon the mouse coordinates
self.collider.setOrigin(origin)
self.collider.setDirection(dir)
self.ct.traverse(targetNodePath)
self.sortEntries()
def pickGeom3D(self, targetNodePath = None,
origin = Point3(0), dir = Vec3(0, 0, -1),
skipFlags = SKIP_HIDDEN | SKIP_CAMERA):
if targetNodePath is None:
targetNodePath = render
self.collideWithGeom()
self.pick3D(targetNodePath, origin, dir)
# Determine collision entry
return self.findCollisionEntry(skipFlags)
def pickBitMask3D(self, bitMask = BitMask32.allOff(),
targetNodePath = None,
origin = Point3(0), dir = Vec3(0, 0, -1),
skipFlags = SKIP_ALL):
if targetNodePath is None:
targetNodePath = render
self.collideWithBitMask(bitMask)
self.pick3D(targetNodePath, origin, dir)
# Determine collision entry
return self.findCollisionEntry(skipFlags)
class SelectionSegment(SelectionQueue):
# Like a selection ray but with two endpoints instead of an endpoint
# and a direction
def __init__(self, parentNP = None, numSegments = 1):
if parentNP is None:
parentNP = render
# Initialize the superclass
SelectionQueue.__init__(self, parentNP)
self.colliders = []
self.numColliders = 0
for i in range(numSegments):
self.addCollider(CollisionSegment())
def addCollider(self, collider):
# Record new collision object
self.colliders.append(collider)
# Add the collider to the collision Node
self.collisionNode.addSolid(collider)
self.numColliders += 1
def pickGeom(self, targetNodePath = None, endPointList = [],
skipFlags = SKIP_HIDDEN | SKIP_CAMERA):
if targetNodePath is None:
targetNodePath = render
self.collideWithGeom()
for i in range(min(len(endPointList), self.numColliders)):
pointA, pointB = endPointList[i]
collider = self.colliders[i]
collider.setPointA(pointA)
collider.setPointB(pointB)
self.ct.traverse(targetNodePath)
# Determine collision entry
return self.findCollisionEntry(skipFlags)
def pickBitMask(self, bitMask = BitMask32.allOff(),
targetNodePath = None, endPointList = [],
skipFlags = SKIP_HIDDEN | SKIP_CAMERA):
if targetNodePath is None:
targetNodePath = render
self.collideWithBitMask(bitMask)
for i in range(min(len(endPointList), self.numColliders)):
pointA, pointB = endPointList[i]
collider = self.colliders[i]
collider.setPointA(pointA)
collider.setPointB(pointB)
self.ct.traverse(targetNodePath)
# Determine collision entry
return self.findCollisionEntry(skipFlags)
class SelectionSphere(SelectionQueue):
# Wrapper around collision sphere
def __init__(self, parentNP = None, numSpheres = 1):
if parentNP is None:
parentNP = render
# Initialize the superclass
SelectionQueue.__init__(self, parentNP)
self.colliders = []
self.numColliders = 0
for i in range(numSpheres):
self.addCollider(CollisionSphere(Point3(0), 1))
def addCollider(self, collider):
# Record new collision object
self.colliders.append(collider)
# Add the collider to the collision Node
self.collisionNode.addSolid(collider)
self.numColliders += 1
def setCenter(self, i, center):
c = self.colliders[i]
c.setCenter(center)
def setRadius(self, i, radius):
c = self.colliders[i]
c.setRadius(radius)
def setCenterRadius(self, i, center, radius):
c = self.colliders[i]
c.setCenter(center)
c.setRadius(radius)
def isEntryBackfacing(self, entry):
# If dot product of collision point surface normal and
# ray from sphere origin to collision point is positive,
# center is on the backside of the polygon
fromNodePath = entry.getFromNodePath()
v = Vec3(entry.getSurfacePoint(fromNodePath) -
entry.getFrom().getCenter())
n = entry.getSurfaceNormal(fromNodePath)
# If points almost on top of each other, reject face
# (treat as backfacing)
if v.length() < 0.05:
return 1
# Normalize and check angle between to vectors
v.normalize()
return v.dot(n) >= 0
def pick(self, targetNodePath, skipFlags):
self.ct.traverse(targetNodePath)
self.sortEntries()
return self.findCollisionEntry(skipFlags)
def pickGeom(self, targetNodePath = None,
skipFlags = SKIP_HIDDEN | SKIP_CAMERA):
if targetNodePath is None:
targetNodePath = render
self.collideWithGeom()
return self.pick(targetNodePath, skipFlags)
def pickBitMask(self, bitMask = BitMask32.allOff(),
targetNodePath = None,
skipFlags = SKIP_HIDDEN | SKIP_CAMERA):
if targetNodePath is None:
targetNodePath = render
self.collideWithBitMask(bitMask)
return self.pick(targetNodePath, skipFlags)
|
|
"""Module to determine the pywikibot version (tag, revision and date)."""
#
# (C) Pywikibot team, 2007-2021
#
# Distributed under the terms of the MIT license.
#
import datetime
import json
import os
import pathlib
import socket
import subprocess
import sys
import sysconfig
import time
import xml.dom.minidom
from contextlib import closing, suppress
from importlib import import_module
from io import BytesIO
from typing import Optional
from warnings import warn
import pywikibot
from pywikibot import config
from pywikibot.backports import List, cache
from pywikibot.comms.http import fetch
from pywikibot.exceptions import VersionParseError
_logger = 'version'
def _get_program_dir():
_program_dir = os.path.normpath(
os.path.split(os.path.dirname(__file__))[0])
return _program_dir
def get_toolforge_hostname() -> Optional[str]:
"""Get hostname of the current Toolforge host.
.. versionadded:: 3.0
:return: The hostname of the currently running host,
if it is in Wikimedia Toolforge; otherwise return None.
"""
if socket.getfqdn().endswith('.tools.eqiad.wmflabs'):
return socket.gethostname()
return None
def getversion(online: bool = True) -> str:
"""Return a pywikibot version string.
:param online: Include information obtained online
"""
branches = {
'master': 'branches/master',
'stable': 'branches/stable',
}
data = getversiondict()
data['cmp_ver'] = 'n/a'
local_hsh = data.get('hsh', '')
hsh = {}
if online:
if not local_hsh:
data['cmp_ver'] = 'UNKNOWN'
else:
for branch, path in branches.items():
with suppress(Exception):
hsh[getversion_onlinerepo(path)] = branch
if hsh:
data['cmp_ver'] = hsh.get(local_hsh, 'OUTDATED')
data['hsh'] = local_hsh[:7] # make short hash from full hash
return '{tag} ({hsh}, {rev}, {date}, {cmp_ver})'.format_map(data)
@cache
def getversiondict():
"""Get version info for the package.
:return:
- tag (name for the repository),
- rev (current revision identifier),
- date (date of current revision),
- hash (git hash for the current revision)
:rtype: ``dict`` of four ``str``
"""
_program_dir = _get_program_dir()
exceptions = {}
for vcs_func in (getversion_git,
getversion_svn,
getversion_nightly,
getversion_package):
try:
(tag, rev, date, hsh) = vcs_func(_program_dir)
except Exception as e:
exceptions[vcs_func] = e
else:
break
else:
# nothing worked; version unknown (but suppress exceptions)
# the value is most likely '$Id' + '$', it means that
# pywikibot was imported without using version control at all.
tag, rev, date, hsh = (
'', '-1 (unknown)', '0 (unknown)', '(unknown)')
warn('Unable to detect version; exceptions raised:\n{!r}'
.format(exceptions), UserWarning)
exceptions = None
# Git and SVN can silently fail, as it may be a nightly.
if exceptions:
pywikibot.debug('version algorithm exceptions:\n{!r}'
.format(exceptions), _logger)
if isinstance(date, str):
datestring = date
elif isinstance(date, time.struct_time):
datestring = time.strftime('%Y/%m/%d, %H:%M:%S', date)
else:
warn('Unable to detect package date', UserWarning)
datestring = '-2 (unknown)'
return {'tag': tag, 'rev': rev, 'date': datestring, 'hsh': hsh}
def svn_rev_info(path): # pragma: no cover
"""Fetch information about the current revision of a Subversion checkout.
:param path: directory of the Subversion checkout
:return:
- tag (name for the repository),
- rev (current Subversion revision identifier),
- date (date of current revision),
:rtype: ``tuple`` of two ``str`` and a ``time.struct_time``
"""
if not os.path.isdir(os.path.join(path, '.svn')):
path = os.path.join(path, '..')
_program_dir = path
filename = os.path.join(_program_dir, '.svn/entries')
if os.path.isfile(filename):
with open(filename) as entries:
version = entries.readline().strip()
if version != '12':
for _ in range(3):
entries.readline()
tag = entries.readline().strip()
t = tag.split('://', 1)
t[1] = t[1].replace('svn.wikimedia.org/svnroot/pywikipedia/',
'')
tag = '[{}] {}'.format(*t)
for _ in range(4):
entries.readline()
date = time.strptime(entries.readline()[:19],
'%Y-%m-%dT%H:%M:%S')
rev = entries.readline()[:-1]
return tag, rev, date
# We haven't found the information in entries file.
# Use sqlite table for new entries format
from sqlite3 import dbapi2 as sqlite
with closing(
sqlite.connect(os.path.join(_program_dir, '.svn/wc.db'))) as con:
cur = con.cursor()
cur.execute("""select
local_relpath, repos_path, revision, changed_date, checksum from nodes
order by revision desc, changed_date desc""")
name, tag, rev, date, checksum = cur.fetchone()
cur.execute('select root from repository')
tag, = cur.fetchone()
tag = os.path.split(tag)[1]
date = time.gmtime(date / 1000000)
return tag, rev, date
def github_svn_rev2hash(tag: str, rev): # pragma: no cover
"""Convert a Subversion revision to a Git hash using Github.
:param tag: name of the Subversion repo on Github
:param rev: Subversion revision identifier
:return: the git hash
"""
uri = 'https://github.com/wikimedia/{}/!svn/vcc/default'.format(tag)
request = fetch(uri, method='PROPFIND',
data="<?xml version='1.0' encoding='utf-8'?>"
'<propfind xmlns=\"DAV:\"><allprop/></propfind>',
headers={'label': str(rev),
'user-agent': 'SVN/1.7.5 {pwb}'})
dom = xml.dom.minidom.parse(BytesIO(request.content))
hsh = dom.getElementsByTagName('C:git-commit')[0].firstChild.nodeValue
date = dom.getElementsByTagName('S:date')[0].firstChild.nodeValue
date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')
return hsh, date
def getversion_svn(path=None): # pragma: no cover
"""Get version info for a Subversion checkout.
:param path: directory of the Subversion checkout
:return:
- tag (name for the repository),
- rev (current Subversion revision identifier),
- date (date of current revision),
- hash (git hash for the Subversion revision)
:rtype: ``tuple`` of three ``str`` and a ``time.struct_time``
"""
_program_dir = path or _get_program_dir()
tag, rev, date = svn_rev_info(_program_dir)
hsh, date2 = github_svn_rev2hash(tag, rev)
if date.tm_isdst >= 0 and date2.tm_isdst >= 0:
assert date == date2, 'Date of version is not consistent'
# date.tm_isdst is -1 means unknown state
# compare its contents except daylight saving time status
else:
for i in range(len(date) - 1):
assert date[i] == date2[i], 'Date of version is not consistent'
rev = 's{}'.format(rev)
if (not date or not tag or not rev) and not path:
raise VersionParseError
return (tag, rev, date, hsh)
def getversion_git(path=None):
"""Get version info for a Git clone.
:param path: directory of the Git checkout
:return:
- tag (name for the repository),
- rev (current revision identifier),
- date (date of current revision),
- hash (git hash for the current revision)
:rtype: ``tuple`` of three ``str`` and a ``time.struct_time``
"""
_program_dir = path or _get_program_dir()
cmd = 'git'
try:
subprocess.Popen([cmd], stdout=subprocess.PIPE).communicate()
except OSError:
# some Windows git versions provide git.cmd instead of git.exe
cmd = 'git.cmd'
with open(os.path.join(_program_dir, '.git/config')) as f:
tag = f.read()
# Try 'origin' and then 'gerrit' as remote name; bail if can't find either.
remote_pos = tag.find('[remote "origin"]')
if remote_pos == -1:
remote_pos = tag.find('[remote "gerrit"]')
if remote_pos == -1:
tag = '?'
else:
s = tag.find('url = ', remote_pos)
e = tag.find('\n', s)
tag = tag[(s + 6):e]
t = tag.strip().split('/')
tag = '[{}] {}'.format(t[0][:-1], '-'.join(t[3:]))
dp = subprocess.Popen([cmd, '--no-pager',
'log', '-1',
'--pretty=format:"%ad|%an|%h|%H|%d"',
'--abbrev-commit',
'--date=iso'],
cwd=_program_dir,
stdout=subprocess.PIPE)
info, stderr = dp.communicate()
info = info.decode(config.console_encoding).split('|')
date = info[0][:-6]
date = time.strptime(date.strip('"'), '%Y-%m-%d %H:%M:%S')
dp = subprocess.Popen([cmd, 'rev-list', 'HEAD'],
cwd=_program_dir,
stdout=subprocess.PIPE)
rev, stderr = dp.communicate()
rev = 'g{}'.format(len(rev.splitlines()))
hsh = info[3] # also stored in '.git/refs/heads/master'
if (not date or not tag or not rev) and not path:
raise VersionParseError
return (tag, rev, date, hsh)
def getversion_nightly(path=None): # pragma: no cover
"""Get version info for a nightly release.
:param path: directory of the uncompressed nightly.
:return:
- tag (name for the repository),
- rev (current revision identifier),
- date (date of current revision),
- hash (git hash for the current revision)
:rtype: ``tuple`` of three ``str`` and a ``time.struct_time``
"""
if not path:
path = _get_program_dir()
with open(os.path.join(path, 'version')) as data:
(tag, rev, date, hsh) = data.readlines()
date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')
if not date or not tag or not rev:
raise VersionParseError
return (tag, rev, date, hsh)
def getversion_package(path=None):
"""Get version info for an installed package.
:param path: Unused argument
:return:
- tag: 'pywikibot/__init__.py'
- rev: '-1 (unknown)'
- date (date the package was installed locally),
- hash (git hash for the current revision of 'pywikibot/__init__.py')
:rtype: ``tuple`` of four ``str``
"""
hsh = ''
date = get_module_mtime(pywikibot).timetuple()
tag = 'pywikibot/__init__.py'
rev = '-1 (unknown)'
return (tag, rev, date, hsh)
def getversion_onlinerepo(path: str = 'branches/master'):
"""Retrieve current framework git hash from Gerrit."""
from pywikibot.comms import http
# Gerrit API responses include )]}' at the beginning,
# make sure to strip it out
buf = http.fetch(
'https://gerrit.wikimedia.org/r/projects/pywikibot%2Fcore/' + path,
headers={'user-agent': '{pwb}'}).text[4:]
try:
hsh = json.loads(buf)['revision']
return hsh
except Exception as e:
raise VersionParseError('{!r} while parsing {!r}'.format(e, buf))
def get_module_filename(module) -> Optional[str]:
"""
Retrieve filename from an imported pywikibot module.
It uses the __file__ attribute of the module. If it's file extension ends
with py and another character the last character is discarded when the py
file exist.
:param module: The module instance.
:type module: module
:return: The filename if it's a pywikibot module otherwise None.
"""
if hasattr(module, '__file__'):
filename = module.__file__
if not filename or not os.path.exists(filename):
return None
program_dir = _get_program_dir()
if filename[:len(program_dir)] == program_dir:
return filename
return None
def get_module_mtime(module):
"""
Retrieve the modification time from an imported module.
:param module: The module instance.
:type module: module
:return: The modification time if it's a pywikibot module otherwise None.
:rtype: datetime or None
"""
filename = get_module_filename(module)
if filename:
return datetime.datetime.fromtimestamp(os.stat(filename).st_mtime)
return None
def package_versions(
modules: Optional[List[str]] = None,
builtins: Optional[bool] = False,
standard_lib: Optional[bool] = None
):
"""Retrieve package version information.
When builtins or standard_lib are None, they will be included only
if a version was found in the package.
:param modules: Modules to inspect
:param builtins: Include builtins
:param standard_lib: Include standard library packages
"""
if not modules:
modules = sys.modules.keys()
std_lib_dir = pathlib.Path(sysconfig.get_paths()['stdlib'])
root_packages = {key.split('.')[0] for key in modules}
builtin_packages = {name.split('.')[0] for name in root_packages
if name in sys.builtin_module_names
or '_' + name in sys.builtin_module_names}
# Improve performance by removing builtins from the list if possible.
if builtins is False:
root_packages = root_packages - builtin_packages
std_lib_packages = []
paths = {}
data = {}
for name in root_packages:
try:
package = import_module(name)
except ImportError as e:
data[name] = {'name': name, 'err': e}
continue
info = {'package': package, 'name': name}
if name in builtin_packages:
info['type'] = 'builtins'
if '__file__' in package.__dict__:
# Determine if this file part is of the standard library.
# possible Namespace package
if not hasattr(package, '__file__') or package.__file__ is None:
_file = None
_path = pathlib.Path(package.__path__[0])
else:
_file = pathlib.Path(package.__file__)
_path = _file.parent
if _path == std_lib_dir:
std_lib_packages.append(name)
if standard_lib is False:
continue
info['type'] = 'standard library'
# Strip '__init__.py' from the filename.
if (not hasattr(package, '__file__')
or package.__file__ is None
or _file.name == '__init__.py'):
path = _path
else:
path = _file
info['path'] = path
assert path not in paths, \
'Path {} of the package {} is in defined paths as {}' \
.format(path, name, paths[path])
paths[path] = name
if '__version__' in package.__dict__:
info['ver'] = package.__version__
elif name.startswith('unicodedata'):
info['ver'] = package.unidata_version
# If builtins or standard_lib is None,
# only include package if a version was found.
if builtins is None and name in builtin_packages \
or standard_lib is None and name in std_lib_packages:
if 'ver' in info:
data[name] = info
else:
# Remove the entry from paths, so it isn't processed below
del paths[info['path']]
else:
data[name] = info
# Remove any pywikibot sub-modules which were loaded as a package.
# e.g. 'wikipedia_family.py' is loaded as 'wikipedia'
_program_dir = _get_program_dir()
dir_parts = pathlib.Path(_program_dir).parts
length = len(dir_parts)
for path, name in paths.items():
lib_parts = path.parts
if dir_parts != lib_parts[:length]:
continue
if lib_parts[length] != '.tox':
del data[name]
return data
|
|
# Suggest running as: WANDB_BASE_URL=http://api.wandb.test python artifact_object_reference_test.py timothysweeney@Timothys-MacBook-Pro
import shutil
import os
import binascii
import base64
import time
from math import sin, cos, pi
import numpy as np
import sys
from bokeh.plotting import figure
PY3 = sys.version_info.major == 3 and sys.version_info.minor >= 6
if PY3:
from wandb.sdk.interface import artifacts
else:
from wandb.sdk_py27.interface import artifacts
WANDB_PROJECT_ENV = os.environ.get("WANDB_PROJECT")
if WANDB_PROJECT_ENV is None:
WANDB_PROJECT = "test__" + str(round(time.time()) % 1000000)
else:
WANDB_PROJECT = WANDB_PROJECT_ENV
os.environ["WANDB_PROJECT"] = WANDB_PROJECT
WANDB_SILENT_ENV = os.environ.get("WANDB_SILENT")
if WANDB_SILENT_ENV is None:
WANDB_SILENT = "true"
else:
WANDB_SILENT = WANDB_SILENT_ENV
os.environ["WANDB_SILENT"] = WANDB_SILENT
import wandb
columns = ["id", "class_id", "string", "bool", "int", "float", "Image", "Clouds", "HTML", "Video", "Bokeh", "Audio", "np_data"]
def _make_wandb_image(suffix=""):
class_labels = {1: "tree", 2: "car", 3: "road"}
test_folder = os.path.dirname(os.path.realpath(__file__))
im_path = os.path.join(test_folder, "..", "assets", "test{}.png".format(suffix))
return wandb.Image(
im_path,
classes=wandb.Classes([
{"id": 1, "name": "tree"},
{"id": 2, "name": "car"},
{"id": 3, "name": "road"},
]),
boxes={
"predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 2,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
"ground_truth": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 2,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
},
masks={
"predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"ground_truth": {"path": im_path, "class_labels": class_labels},
},
)
def _make_point_cloud():
# Generate a symetric pattern
POINT_COUNT = 20000
# Choose a random sample
theta_chi = pi * np.random.rand(POINT_COUNT, 2)
def gen_point(theta, chi, i):
p = sin(theta) * 4.5 * sin(i + 1 / 2 * (i * i + 2)) + \
cos(chi) * 7 * sin((2 * i - 4) / 2 * (i + 2))
x = p * sin(chi) * cos(theta)
y = p * sin(chi) * sin(theta)
z = p * cos(chi)
r = sin(theta) * 120 + 120
g = sin(x) * 120 + 120
b = cos(y) * 120 + 120
return [x, y, z, r, g, b]
def wave_pattern(i):
return np.array([gen_point(theta, chi, i) for [theta, chi] in theta_chi])
return wandb.Object3D(wave_pattern(0))
# static assets for comparisons
pc1 = _make_point_cloud()
pc2 = _make_point_cloud()
pc3 = _make_point_cloud()
pc4 = _make_point_cloud()
def _make_bokeh():
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
p = figure(title="simple line example", x_axis_label='x', y_axis_label='y')
p.line(x, y, legend_label="Temp.", line_width=2)
return wandb.data_types.Bokeh(p)
b1 = _make_bokeh()
b2 = _make_bokeh()
b3 = _make_bokeh()
b4 = _make_bokeh()
def _make_html():
return wandb.Html("<p>Embedded</p><iframe src='https://wandb.ai'></iframe>")
def _make_video():
return wandb.Video(np.random.randint(0, high=255, size=(4, 1, 10, 10), dtype=np.uint8)) # 1 second video of 10x10 pixels
vid1 = _make_video()
vid2 = _make_video()
vid3 = _make_video()
vid4 = _make_video()
def _make_wandb_audio(frequency, caption):
SAMPLE_RATE = 44100
DURATION_SECONDS = 1
data = np.sin(
2 * np.pi * np.arange(SAMPLE_RATE * DURATION_SECONDS) * frequency / SAMPLE_RATE
)
return wandb.Audio(data, SAMPLE_RATE, caption)
aud1 = _make_wandb_audio(440, "four forty")
aud_ref_https = wandb.Audio(
"https://wandb-artifacts-refs-public-test.s3-us-west-2.amazonaws.com/StarWars3.wav",
caption="star wars https"
)
aud_ref_s3 = wandb.Audio(
"s3://wandb-artifacts-refs-public-test/StarWars3.wav",
caption="star wars s3"
)
aud_ref_gs = wandb.Audio(
"gs://wandb-artifact-refs-public-test/StarWars3.wav",
caption="star wars gs"
)
np_data = np.random.randint(255, size=(4, 16, 16, 3))
def _make_wandb_table():
classes = wandb.Classes([
{"id": 1, "name": "tree"},
{"id": 2, "name": "car"},
{"id": 3, "name": "road"},
])
table = wandb.Table(
columns=[c for c in columns[:-1]],
data=[
[1, 1, "string1", True, 1, 1.1, _make_wandb_image(), pc1, _make_html(), vid1, b1, aud1],
[2, 2, "string2", True, 1, 1.2, _make_wandb_image(), pc2, _make_html(), vid2, b2, aud_ref_https],
[3, 1, "string3", False, -0, -1.3, _make_wandb_image("2"), pc3, _make_html(), vid3, b3, aud_ref_s3],
[4, 3, "string4", False, -0, -1.4, _make_wandb_image("2"), pc4, _make_html(), vid4, b4, aud_ref_gs],
],
)
table.cast("class_id", classes.get_type())
table.add_column(columns[-1], np_data)
return table
def _make_wandb_joinedtable():
return wandb.JoinedTable(_make_wandb_table(), _make_wandb_table(), "id")
def _b64_to_hex_id(id_string):
return binascii.hexlify(base64.standard_b64decode(str(id_string))).decode("utf-8")
# Artifact1.add_reference(artifact_URL) => recursive reference
def test_artifact_add_reference_via_url():
""" This test creates three artifacts. The middle artifact references the first artifact's file,
and the last artifact references the middle artifact's reference. The end result of downloading
the last artifact in a fresh, forth run, should be that all 3 artifacts are downloaded and that
the file in the last artifact is actually a symlink to the first artifact's file.
"""
upstream_artifact_name = "upstream_artifact"
middle_artifact_name = "middle_artifact"
downstream_artifact_name = "downstream_artifact"
upstream_local_path = "upstream/local/path/"
upstream_artifact_path = "upstream/artifact/path/"
middle_artifact_path = "middle/artifact/path/"
downstream_artifact_path = "downstream/artifact/path/"
upstream_local_file_path = upstream_local_path + "file.txt"
upstream_artifact_file_path = upstream_artifact_path + "file.txt"
middle_artifact_file_path = middle_artifact_path + "file.txt"
downstream_artifact_file_path = downstream_artifact_path + "file.txt"
file_text = "Luke, I am your Father!!!!!"
# Create a super important file
if not os.path.exists(upstream_local_path):
os.makedirs(upstream_local_path)
with open(upstream_local_file_path, "w") as file:
file.write(file_text)
# Create an artifact with such file stored
with wandb.init() as run:
artifact = wandb.Artifact(upstream_artifact_name, "database")
artifact.add_file(upstream_local_file_path, upstream_artifact_file_path)
run.log_artifact(artifact)
# Create an middle artifact with such file referenced (notice no need to download)
with wandb.init() as run:
artifact = wandb.Artifact(middle_artifact_name, "database")
upstream_artifact = run.use_artifact(upstream_artifact_name + ":latest")
artifact.add_reference(
"wandb-artifact://{}/{}".format(_b64_to_hex_id(upstream_artifact.id),str(upstream_artifact_file_path)),
middle_artifact_file_path,
)
run.log_artifact(artifact)
# Create a downstream artifact that is referencing the middle's reference
with wandb.init() as run:
artifact = wandb.Artifact(downstream_artifact_name, "database")
middle_artifact = run.use_artifact(middle_artifact_name + ":latest")
artifact.add_reference(
"wandb-artifact://{}/{}".format(_b64_to_hex_id(middle_artifact.id),str(middle_artifact_file_path)),
downstream_artifact_file_path,
)
run.log_artifact(artifact)
# Remove the directories for good measure
if os.path.isdir("upstream"):
shutil.rmtree("upstream")
if os.path.isdir("artifacts"):
shutil.rmtree("artifacts")
# Finally, use the artifact (download it) and enforce that the file is where we want it!
with wandb.init() as run:
downstream_artifact = run.use_artifact(downstream_artifact_name + ":latest")
downstream_path = downstream_artifact.download()
with open(
os.path.join(downstream_path, downstream_artifact_file_path), "r"
) as file:
assert file.read() == file_text
# # Artifact1.add_reference(artifact2.get_path(file_name))
def test_add_reference_via_artifact_entry():
"""This test is the same as test_artifact_add_reference_via_url, but rather
than passing the direct URL, we pass an Artifact entry, which will automatically
resolve to the correct URL
"""
upstream_artifact_name = "upstream_artifact"
middle_artifact_name = "middle_artifact"
downstream_artifact_name = "downstream_artifact"
upstream_local_path = "upstream/local/path/"
upstream_artifact_path = "upstream/artifact/path/"
middle_artifact_path = "middle/artifact/path/"
downstream_artifact_path = "downstream/artifact/path/"
upstream_local_file_path = upstream_local_path + "file.txt"
upstream_artifact_file_path = upstream_artifact_path + "file.txt"
middle_artifact_file_path = middle_artifact_path + "file.txt"
downstream_artifact_file_path = downstream_artifact_path + "file.txt"
file_text = "Luke, I am your Father!!!!!"
# Create a super important file
if not os.path.exists(upstream_local_path):
os.makedirs(upstream_local_path)
with open(upstream_local_file_path, "w") as file:
file.write(file_text)
# Create an artifact with such file stored
with wandb.init() as run:
artifact = wandb.Artifact(upstream_artifact_name, "database")
artifact.add_file(upstream_local_file_path, upstream_artifact_file_path)
run.log_artifact(artifact)
# Create an middle artifact with such file referenced (notice no need to download)
with wandb.init() as run:
artifact = wandb.Artifact(middle_artifact_name, "database")
upstream_artifact = run.use_artifact(upstream_artifact_name + ":latest")
artifact.add_reference(
upstream_artifact.get_path(upstream_artifact_file_path),
middle_artifact_file_path,
)
run.log_artifact(artifact)
# Create a downstream artifact that is referencing the middle's reference
with wandb.init() as run:
artifact = wandb.Artifact(downstream_artifact_name, "database")
middle_artifact = run.use_artifact(middle_artifact_name + ":latest")
artifact.add_reference(
middle_artifact.get_path(middle_artifact_file_path),
downstream_artifact_file_path,
)
run.log_artifact(artifact)
# Remove the directories for good measure
if os.path.isdir("upstream"):
shutil.rmtree("upstream")
if os.path.isdir("artifacts"):
shutil.rmtree("artifacts")
# Finally, use the artifact (download it) and enforce that the file is where we want it!
with wandb.init() as run:
downstream_artifact = run.use_artifact(downstream_artifact_name + ":latest")
downstream_path = downstream_artifact.download()
downstream_path = downstream_artifact.download() # should not fail on second download.
# assert os.path.islink(
# os.path.join(downstream_path, downstream_artifact_file_path)
# )
with open(
os.path.join(downstream_path, downstream_artifact_file_path), "r"
) as file:
assert file.read() == file_text
# # Artifact1.get(MEDIA_NAME) => media obj
def test_get_artifact_obj_by_name():
"""Tests tests the ability to instantiate a wandb Media object when passed
the name of such object. This is the logical inverse of Artifact.add(name).
TODO: test more robustly for every Media type, nested objects (eg. Table -> Image),
and references
"""
with wandb.init() as run:
artifact = wandb.Artifact("A2", "database")
image = _make_wandb_image()
table = _make_wandb_table()
artifact.add(image, "I1")
artifact.add(table, "T1")
run.log_artifact(artifact)
with wandb.init() as run:
artifact = run.use_artifact("A2:latest")
actual_image = artifact.get("I1")
assert actual_image == image
actual_table = artifact.get("T1")
assert actual_table.columns == columns
assert actual_table.data[0][columns.index("Image")] == image
assert actual_table.data[1][columns.index("Image")] == _make_wandb_image("2")
actual_table._eq_debug(_make_wandb_table(), True)
assert actual_table == _make_wandb_table()
# # Artifact1.add(artifact2.get(MEDIA_NAME))
def test_adding_artifact_by_object():
"""This test validates that we can add wandb Media objects
to an artifact by passing the object itself.
"""
# Create an artifact with such file stored
with wandb.init() as run:
artifact = wandb.Artifact("upstream_media", "database")
artifact.add(_make_wandb_image(), "I1")
run.log_artifact(artifact)
# Create an middle artifact with such file referenced (notice no need to download)
with wandb.init() as run:
artifact = wandb.Artifact("downstream_media", "database")
upstream_artifact = run.use_artifact("upstream_media:latest")
artifact.add(upstream_artifact.get("I1"), "T2")
run.log_artifact(artifact)
if os.path.isdir("artifacts"):
shutil.rmtree("artifacts")
with wandb.init() as run:
downstream_artifact = run.use_artifact("downstream_media:latest")
downstream_path = downstream_artifact.download()
# assert os.path.islink(os.path.join(downstream_path, "T2.image-file.json"))
assert downstream_artifact.get("T2") == _make_wandb_image()
def _cleanup():
artifacts.get_artifacts_cache()._artifacts_by_id = {}
if os.path.isdir("wandb"):
shutil.rmtree("wandb")
if os.path.isdir("artifacts"):
shutil.rmtree("artifacts")
if os.path.isdir("upstream"):
shutil.rmtree("upstream")
def test_image_reference_artifact():
with wandb.init() as run:
artifact = wandb.Artifact("image_data", "data")
image = _make_wandb_image()
artifact.add(image, "image")
run.log_artifact(artifact)
with wandb.init() as run:
artifact_1 = run.use_artifact("image_data:latest")
artifact = wandb.Artifact("reference_data", "data")
artifact.add(artifact_1.get("image"), "image_2")
run.log_artifact(artifact)
_cleanup()
with wandb.init() as run:
artifact_2 = run.use_artifact("reference_data:latest")
artifact_2.download()
# assert os.path.islink(os.path.join(artifact_2._default_root(), "image_2.image-file.json"))
def test_nested_reference_artifact():
with wandb.init() as run:
artifact = wandb.Artifact("image_data", "data")
image = _make_wandb_image()
artifact.add(image, "image")
run.log_artifact(artifact)
with wandb.init() as run:
artifact_1 = run.use_artifact("image_data:latest")
artifact = wandb.Artifact("reference_data", "data")
table = wandb.Table(["image"], [[artifact_1.get("image")]])
artifact.add(table, "table_2")
run.log_artifact(artifact)
with wandb.init() as run:
artifact_3 = run.use_artifact("reference_data:latest")
table_2 = artifact_3.get("table_2")
# assert os.path.islink(os.path.join(artifact_3._default_root(), "media", "images", "test.png"))
table._eq_debug(table_2, True)
assert table == table_2
artifact_3.download()
def test_table_slice_reference_artifact():
with wandb.init() as run:
artifact = wandb.Artifact("table_data", "data")
table = _make_wandb_table()
artifact.add(table, "table")
run.log_artifact(artifact)
with wandb.init() as run:
artifact_1 = run.use_artifact("table_data:latest")
t1 = artifact_1.get("table")
artifact = wandb.Artifact("intermediate_data", "data")
i1 = wandb.Table(t1.columns, t1.data[:1])
i2 = wandb.Table(t1.columns, t1.data[1:])
artifact.add(i1, "table1")
artifact.add(i2, "table2")
run.log_artifact(artifact)
with wandb.init() as run:
artifact_2 = run.use_artifact("intermediate_data:latest")
i1 = artifact_2.get("table1")
i2 = artifact_2.get("table2")
artifact = wandb.Artifact("reference_data", "data")
table1 = wandb.Table(t1.columns, i1.data)
table2 = wandb.Table(t1.columns, i2.data)
artifact.add(table1, "table1")
artifact.add(table2, "table2")
run.log_artifact(artifact)
_cleanup()
with wandb.init() as run:
artifact_3 = run.use_artifact("reference_data:latest")
table1 = artifact_3.get("table1")
table2 = artifact_3.get("table2")
assert not os.path.isdir(os.path.join(artifact_2._default_root()))
# assert os.path.islink(os.path.join(artifact_3._default_root(), "media", "images", "test.png"))
# assert os.path.islink(os.path.join(artifact_3._default_root(), "media", "images", "test2.png"))
def assert_eq_data(d1, d2):
assert len(d1) == len(d2)
for ndx in range(len(d1)):
assert len(d1[ndx]) == len(d2[ndx])
for i in range(len(d1[ndx])):
eq = d1[ndx][i] == d2[ndx][i]
if isinstance(eq, list) or isinstance(eq, np.ndarray):
assert np.all(eq)
else:
assert eq
assert_eq_data(t1.data[:1], table1.data)
assert_eq_data(t1.data[1:], table2.data)
# General helper function which will perform the following:
# Add the object to an artifact
# Validate that "getting" this asset returns an object that is equal to the first
# Add a reference to this asset in an intermediate artifact
# Validate that "getting" this reference asset returns an object that is equal to the first
# Validate that the symbolic links are proper
# Add a reference to the intermediate reference in yet a third artifact
# Validate that "getting" this new reference asset returns an object that is equal to the first
# Validate that the intermediate object is not downloaded - there are no "leftover" assets (eg. classes.json)
# Validate that the symbolic links are proper
def assert_media_obj_referential_equality(obj):
with wandb.init() as run:
orig_artifact = wandb.Artifact("orig_artifact", "database")
orig_artifact.add(obj, "obj")
run.log_artifact(orig_artifact)
_cleanup()
with wandb.init() as run:
orig_artifact_ref = run.use_artifact("orig_artifact:latest")
orig_dir = orig_artifact_ref._default_root()
obj1 = orig_artifact_ref.get("obj")
if hasattr(obj, "_eq_debug"):
obj._eq_debug(obj1, True)
assert obj1 == obj
target_path = os.path.join(orig_dir, "obj." + type(obj)._log_type + ".json")
assert os.path.isfile(target_path)
with wandb.init() as run:
orig_artifact_ref = run.use_artifact("orig_artifact:latest")
mid_artifact = wandb.Artifact("mid_artifact", "database")
mid_obj = orig_artifact_ref.get("obj")
mid_artifact.add(mid_obj, "obj2")
run.log_artifact(mid_artifact)
_cleanup()
with wandb.init() as run:
mid_artifact_ref = run.use_artifact("mid_artifact:latest")
mid_dir = mid_artifact_ref._default_root()
obj2 = mid_artifact_ref.get("obj2")
if hasattr(obj, "_eq_debug"):
obj._eq_debug(obj2, True)
assert obj2 == obj
# name = "obj2." + type(obj)._log_type + ".json"
# start_path = os.path.join(mid_dir, name)
# mid_artifact_ref.get_path(name).download()
# assert os.path.islink(start_path)
# assert os.path.abspath(os.readlink(start_path)) == os.path.abspath(target_path)
with wandb.init() as run:
mid_artifact_ref = run.use_artifact("mid_artifact:latest")
down_artifact = wandb.Artifact("down_artifact", "database")
down_obj = mid_artifact_ref.get("obj2")
down_artifact.add(down_obj, "obj3")
run.log_artifact(down_artifact)
_cleanup()
with wandb.init() as run:
down_artifact_ref = run.use_artifact("down_artifact:latest")
down_dir = down_artifact_ref._default_root()
obj3 = down_artifact_ref.get("obj3")
if hasattr(obj, "_eq_debug"):
obj._eq_debug(obj3, True)
assert obj3 == obj
assert not os.path.isdir(os.path.join(mid_dir))
# name = "obj3." + type(obj)._log_type + ".json"
# start_path = os.path.join(down_dir, name)
# down_artifact_ref.get_path(name).download()
# assert os.path.islink(start_path)
# assert os.path.abspath(os.readlink(start_path)) == os.path.abspath(target_path)
def test_table_refs():
assert_media_obj_referential_equality(_make_wandb_table())
def test_image_refs():
assert_media_obj_referential_equality(_make_wandb_image())
def test_point_cloud_refs():
assert_media_obj_referential_equality(_make_point_cloud())
def test_bokeh_refs():
assert_media_obj_referential_equality(_make_bokeh())
def test_html_refs():
assert_media_obj_referential_equality(_make_html())
def test_video_refs():
assert_media_obj_referential_equality(_make_video())
def test_joined_table_refs():
assert_media_obj_referential_equality(_make_wandb_joinedtable())
def test_audio_refs():
# assert_media_obj_referential_equality(_make_wandb_audio(440, "four forty"))
assert_media_obj_referential_equality(aud_ref_https)
assert_media_obj_referential_equality(aud_ref_s3)
assert_media_obj_referential_equality(aud_ref_gs)
def test_joined_table_referential():
src_image_1 = _make_wandb_image()
src_image_2 = _make_wandb_image()
src_image_3 = _make_wandb_image()
src_image_4 = _make_wandb_image()
src_table_1 = wandb.Table(["id", "image"], [[1, src_image_1], [2, src_image_2]])
src_table_2 = wandb.Table(["id", "image"], [[1, src_image_3], [2, src_image_4]])
src_jt_1 = wandb.JoinedTable(src_table_1, src_table_2, "id")
with wandb.init() as run:
orig_artifact = wandb.Artifact("art1", "database")
orig_artifact.add(src_jt_1, "src_jt_1")
run.log_artifact(orig_artifact)
with wandb.init() as run:
art1 = run.use_artifact("art1:latest")
src_jt_1 = art1.get("src_jt_1")
src_jt_2 = wandb.JoinedTable(src_jt_1._table1, src_jt_1._table2, "id")
art2 = wandb.Artifact("art2", "database")
art2.add(src_jt_2, "src_jt_2")
run.log_artifact(art2)
_cleanup()
with wandb.init() as run:
art2 = run.use_artifact("art2:latest")
src_jt_2 = art2.get("src_jt_2")
src_jt_1._eq_debug(src_jt_2, True)
assert src_jt_1 == src_jt_2
def test_joined_table_add_by_path():
src_image_1 = _make_wandb_image()
src_image_2 = _make_wandb_image()
src_image_3 = _make_wandb_image()
src_image_4 = _make_wandb_image()
src_table_1 = wandb.Table(["id", "image"], [[1, src_image_1], [2, src_image_2]])
src_table_2 = wandb.Table(["id", "image"], [[1, src_image_3], [2, src_image_4]])
with wandb.init() as run:
tables = wandb.Artifact("tables", "database")
tables.add(src_table_1, "src_table_1")
tables.add(src_table_2, "src_table_2")
# Should be able to add by name directly
jt = wandb.JoinedTable("src_table_1.table.json", "src_table_2.table.json", "id")
tables.add(jt, "jt")
# Make sure it errors when you are not referencing the correct table names
jt_bad = wandb.JoinedTable("bad_table_name.table.json", "bad_table_name.table.json", "id")
got_err = False
try:
tables.add(jt_bad, "jt_bad")
except ValueError as err:
got_err = True
assert got_err
run.log_artifact(tables)
_cleanup()
with wandb.init() as run:
tables_2 = wandb.Artifact("tables_2", "database")
upstream = run.use_artifact("tables:latest")
# Able to add by reference
jt = wandb.JoinedTable(upstream.get_path("src_table_1"), upstream.get_path("src_table_2"), "id")
tables_2.add(jt, "jt")
run.log_artifact(tables_2)
_cleanup()
with wandb.init() as run:
tables_2 = run.use_artifact("tables_2:latest")
jt_2 = tables_2.get("jt")
assert wandb.JoinedTable(upstream.get("src_table_1"), upstream.get("src_table_2"), "id") == jt_2
def test_image_reference_with_preferred_path():
orig_im_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "assets", "test.png")
orig_im_path_2 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "assets", "test2.png")
desired_artifact_path = "images/sample.png"
with wandb.init() as run:
artifact = wandb.Artifact("artifact_1", type="test_artifact")
# manually add the image to a desired path
artifact.add_file(orig_im_path, desired_artifact_path)
# create an image that uses this image (it should be smart enough not to add the image twice)
image = wandb.Image(orig_im_path)
image_2 = wandb.Image(orig_im_path_2) # this one does not have the path preadded
# add the image to the table
table = wandb.Table(["image"], data=[[image],[image_2]])
# add the table to the artifact
artifact.add(table, "table")
run.log_artifact(artifact)
_cleanup()
with wandb.init() as run:
artifact_1 = run.use_artifact("artifact_1:latest")
original_table = artifact_1.get("table")
artifact = wandb.Artifact("artifact_2", type="test_artifact")
# add the image by reference
image = wandb.Image(original_table.data[0][0])
image_2 = wandb.Image(original_table.data[1][0])
# add the image to the table
table = wandb.Table(["image"], data=[[image],[image_2]])
# add the table to the artifact
artifact.add(table, "table")
run.log_artifact(artifact)
_cleanup()
with wandb.init() as run:
artifact_2 = run.use_artifact("artifact_2:latest")
artifact_2.download()
# This test just checks that all this logic does not fail
def test_simple_partition_table():
table_name = "dataset"
table_parts_dir = "dataset_parts"
artifact_name = "simple_dataset"
artifact_type = "dataset"
columns = ["A", "B", "C"]
data = []
# Add Data
run = wandb.init()
artifact = wandb.Artifact(artifact_name, type=artifact_type)
for i in range(5):
row = [i,i*i,2**i]
data.append(row)
table = wandb.Table(columns=columns, data=[row])
artifact.add(table, "{}/{}".format(table_parts_dir, i))
partition_table = wandb.data_types.PartitionedTable(parts_path=table_parts_dir)
artifact.add(partition_table, table_name)
run.log_artifact(artifact)
run.finish()
# test
run = wandb.init()
partition_table = run.use_artifact("{}:latest".format(artifact_name)).get(table_name)
for ndx, row in partition_table.iterrows():
assert row == data[ndx]
run.finish()
def test_distributed_artifact_simple():
table_name = "dataset"
artifact_name = "simple_dist_dataset_{}".format(round(time.time()))
group_name = "test_group_{}".format(np.random.rand())
artifact_type = "distributed_dataset"
count = 2
images = []
image_paths = []
# Add Data
for i in range(count):
run = wandb.init(group=group_name)
artifact = wandb.Artifact(artifact_name, type=artifact_type)
image = wandb.Image(np.random.randint(0, 255, (10, 10)))
path = "image_{}".format(i)
images.append(image)
image_paths.append(path)
artifact.add(image, path)
run.upsert_artifact(artifact)
run.finish()
# TODO: Should we try to use_artifact in some way before it is finished?
# Finish
run = wandb.init(group=group_name)
artifact = wandb.Artifact(artifact_name, type=artifact_type)
# artifact.add_file("./test.py")
run.finish_artifact(artifact)
run.finish()
# test
run = wandb.init()
artifact = run.use_artifact("{}:latest".format(artifact_name))
assert len(artifact.manifest.entries.keys()) == count * 2
# for image, path in zip(images, image_paths):
# assert image == artifact.get(path)
if __name__ == "__main__":
_cleanup()
test_fns = [
test_artifact_add_reference_via_url,
test_add_reference_via_artifact_entry,
test_adding_artifact_by_object,
test_get_artifact_obj_by_name,
test_image_reference_artifact,
test_nested_reference_artifact,
test_table_slice_reference_artifact,
test_image_refs,
test_point_cloud_refs,
test_bokeh_refs,
test_html_refs,
test_video_refs,
test_table_refs,
test_joined_table_refs,
test_audio_refs,
test_joined_table_referential,
test_joined_table_add_by_path,
test_image_reference_with_preferred_path,
# test_distributed_artifact_simple,
test_simple_partition_table,
]
for ndx, test_fn in enumerate(test_fns):
try:
test_fn()
_cleanup()
print("{}/{} Complete".format(ndx+1, len(test_fns)))
except Exception as exception:
print("error on function {}".format(test_fn.__name__))
raise exception
if WANDB_PROJECT_ENV is not None:
os.environ["WANDB_PROJECT"] = WANDB_PROJECT_ENV
if WANDB_SILENT_ENV is not None:
os.environ["WANDB_SILENT"] = WANDB_SILENT_ENV
|
|
import datetime
import hashlib
import heapq
import math
import os
import random
import re
import sys
import threading
import zlib
try:
from collections import Counter
except ImportError:
Counter = None
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from playhouse._sqlite_ext import TableFunction
except ImportError:
TableFunction = None
SQLITE_DATETIME_FORMATS = (
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d',
'%H:%M:%S',
'%H:%M:%S.%f',
'%H:%M')
from peewee import format_date_time
def format_date_time_sqlite(date_value):
return format_date_time(date_value, SQLITE_DATETIME_FORMATS)
try:
from playhouse import _sqlite_udf as cython_udf
except ImportError:
cython_udf = None
# Group udf by function.
CONTROL_FLOW = 'control_flow'
DATE = 'date'
FILE = 'file'
HELPER = 'helpers'
MATH = 'math'
STRING = 'string'
AGGREGATE_COLLECTION = {}
TABLE_FUNCTION_COLLECTION = {}
UDF_COLLECTION = {}
class synchronized_dict(dict):
def __init__(self, *args, **kwargs):
super(synchronized_dict, self).__init__(*args, **kwargs)
self._lock = threading.Lock()
def __getitem__(self, key):
with self._lock:
return super(synchronized_dict, self).__getitem__(key)
def __setitem__(self, key, value):
with self._lock:
return super(synchronized_dict, self).__setitem__(key, value)
def __delitem__(self, key):
with self._lock:
return super(synchronized_dict, self).__delitem__(key)
STATE = synchronized_dict()
SETTINGS = synchronized_dict()
# Class and function decorators.
def aggregate(*groups):
def decorator(klass):
for group in groups:
AGGREGATE_COLLECTION.setdefault(group, [])
AGGREGATE_COLLECTION[group].append(klass)
return klass
return decorator
def table_function(*groups):
def decorator(klass):
for group in groups:
TABLE_FUNCTION_COLLECTION.setdefault(group, [])
TABLE_FUNCTION_COLLECTION[group].append(klass)
return klass
return decorator
def udf(*groups):
def decorator(fn):
for group in groups:
UDF_COLLECTION.setdefault(group, [])
UDF_COLLECTION[group].append(fn)
return fn
return decorator
# Register aggregates / functions with connection.
def register_aggregate_groups(db, *groups):
seen = set()
for group in groups:
klasses = AGGREGATE_COLLECTION.get(group, ())
for klass in klasses:
name = getattr(klass, 'name', klass.__name__)
if name not in seen:
seen.add(name)
db.register_aggregate(klass, name)
def register_table_function_groups(db, *groups):
seen = set()
for group in groups:
klasses = TABLE_FUNCTION_COLLECTION.get(group, ())
for klass in klasses:
if klass.name not in seen:
seen.add(klass.name)
db.register_table_function(klass)
def register_udf_groups(db, *groups):
seen = set()
for group in groups:
functions = UDF_COLLECTION.get(group, ())
for function in functions:
name = function.__name__
if name not in seen:
seen.add(name)
db.register_function(function, name)
def register_groups(db, *groups):
register_aggregate_groups(db, *groups)
register_table_function_groups(db, *groups)
register_udf_groups(db, *groups)
def register_all(db):
register_aggregate_groups(db, *AGGREGATE_COLLECTION)
register_table_function_groups(db, *TABLE_FUNCTION_COLLECTION)
register_udf_groups(db, *UDF_COLLECTION)
# Begin actual user-defined functions and aggregates.
# Scalar functions.
@udf(CONTROL_FLOW)
def if_then_else(cond, truthy, falsey=None):
if cond:
return truthy
return falsey
@udf(DATE)
def strip_tz(date_str):
date_str = date_str.replace('T', ' ')
tz_idx1 = date_str.find('+')
if tz_idx1 != -1:
return date_str[:tz_idx1]
tz_idx2 = date_str.find('-')
if tz_idx2 > 13:
return date_str[:tz_idx2]
return date_str
@udf(DATE)
def human_delta(nseconds, glue=', '):
parts = (
(86400 * 365, 'year'),
(86400 * 30, 'month'),
(86400 * 7, 'week'),
(86400, 'day'),
(3600, 'hour'),
(60, 'minute'),
(1, 'second'),
)
accum = []
for offset, name in parts:
val, nseconds = divmod(nseconds, offset)
if val:
suffix = val != 1 and 's' or ''
accum.append('%s %s%s' % (val, name, suffix))
if not accum:
return '0 seconds'
return glue.join(accum)
@udf(FILE)
def file_ext(filename):
try:
res = os.path.splitext(filename)
except ValueError:
return None
return res[1]
@udf(FILE)
def file_read(filename):
try:
with open(filename) as fh:
return fh.read()
except:
pass
if sys.version_info[0] == 2:
@udf(HELPER)
def gzip(data, compression=9):
return buffer(zlib.compress(data, compression))
@udf(HELPER)
def gunzip(data):
return zlib.decompress(data)
else:
@udf(HELPER)
def gzip(data, compression=9):
if isinstance(data, str):
data = bytes(data.encode('raw_unicode_escape'))
return zlib.compress(data, compression)
@udf(HELPER)
def gunzip(data):
return zlib.decompress(data)
@udf(HELPER)
def hostname(url):
parse_result = urlparse(url)
if parse_result:
return parse_result.netloc
@udf(HELPER)
def toggle(key):
key = key.lower()
STATE[key] = ret = not STATE.get(key)
return ret
@udf(HELPER)
def setting(key, value=None):
if value is None:
return SETTINGS.get(key)
else:
SETTINGS[key] = value
return value
@udf(HELPER)
def clear_settings():
SETTINGS.clear()
@udf(HELPER)
def clear_toggles():
STATE.clear()
@udf(MATH)
def randomrange(start, end=None, step=None):
if end is None:
start, end = 0, start
elif step is None:
step = 1
return random.randrange(start, end, step)
@udf(MATH)
def gauss_distribution(mean, sigma):
try:
return random.gauss(mean, sigma)
except ValueError:
return None
@udf(MATH)
def sqrt(n):
try:
return math.sqrt(n)
except ValueError:
return None
@udf(MATH)
def tonumber(s):
try:
return int(s)
except ValueError:
try:
return float(s)
except:
return None
@udf(STRING)
def substr_count(haystack, needle):
if not haystack or not needle:
return 0
return haystack.count(needle)
@udf(STRING)
def strip_chars(haystack, chars):
return haystack.strip(chars)
def _hash(constructor, *args):
hash_obj = constructor()
for arg in args:
hash_obj.update(arg)
return hash_obj.hexdigest()
# Aggregates.
class _heap_agg(object):
def __init__(self):
self.heap = []
self.ct = 0
def process(self, value):
return value
def step(self, value):
self.ct += 1
heapq.heappush(self.heap, self.process(value))
class _datetime_heap_agg(_heap_agg):
def process(self, value):
return format_date_time_sqlite(value)
if sys.version_info[:2] == (2, 6):
def total_seconds(td):
return (td.seconds +
(td.days * 86400) +
(td.microseconds / (10.**6)))
else:
total_seconds = lambda td: td.total_seconds()
@aggregate(DATE)
class mintdiff(_datetime_heap_agg):
def finalize(self):
dtp = min_diff = None
while self.heap:
if min_diff is None:
if dtp is None:
dtp = heapq.heappop(self.heap)
continue
dt = heapq.heappop(self.heap)
diff = dt - dtp
if min_diff is None or min_diff > diff:
min_diff = diff
dtp = dt
if min_diff is not None:
return total_seconds(min_diff)
@aggregate(DATE)
class avgtdiff(_datetime_heap_agg):
def finalize(self):
if self.ct < 1:
return
elif self.ct == 1:
return 0
total = ct = 0
dtp = None
while self.heap:
if total == 0:
if dtp is None:
dtp = heapq.heappop(self.heap)
continue
dt = heapq.heappop(self.heap)
diff = dt - dtp
ct += 1
total += total_seconds(diff)
dtp = dt
return float(total) / ct
@aggregate(DATE)
class duration(object):
def __init__(self):
self._min = self._max = None
def step(self, value):
dt = format_date_time_sqlite(value)
if self._min is None or dt < self._min:
self._min = dt
if self._max is None or dt > self._max:
self._max = dt
def finalize(self):
if self._min and self._max:
td = (self._max - self._min)
return total_seconds(td)
return None
@aggregate(MATH)
class mode(object):
if Counter:
def __init__(self):
self.items = Counter()
def step(self, *args):
self.items.update(args)
def finalize(self):
if self.items:
return self.items.most_common(1)[0][0]
else:
def __init__(self):
self.items = []
def step(self, item):
self.items.append(item)
def finalize(self):
if self.items:
return max(set(self.items), key=self.items.count)
@aggregate(MATH)
class minrange(_heap_agg):
def finalize(self):
if self.ct == 0:
return
elif self.ct == 1:
return 0
prev = min_diff = None
while self.heap:
if min_diff is None:
if prev is None:
prev = heapq.heappop(self.heap)
continue
curr = heapq.heappop(self.heap)
diff = curr - prev
if min_diff is None or min_diff > diff:
min_diff = diff
prev = curr
return min_diff
@aggregate(MATH)
class avgrange(_heap_agg):
def finalize(self):
if self.ct == 0:
return
elif self.ct == 1:
return 0
total = ct = 0
prev = None
while self.heap:
if total == 0:
if prev is None:
prev = heapq.heappop(self.heap)
continue
curr = heapq.heappop(self.heap)
diff = curr - prev
ct += 1
total += diff
prev = curr
return float(total) / ct
@aggregate(MATH)
class _range(object):
name = 'range'
def __init__(self):
self._min = self._max = None
def step(self, value):
if self._min is None or value < self._min:
self._min = value
if self._max is None or value > self._max:
self._max = value
def finalize(self):
if self._min is not None and self._max is not None:
return self._max - self._min
return None
if cython_udf is not None:
damerau_levenshtein_dist = udf(STRING)(cython_udf.damerau_levenshtein_dist)
levenshtein_dist = udf(STRING)(cython_udf.levenshtein_dist)
str_dist = udf(STRING)(cython_udf.str_dist)
median = aggregate(MATH)(cython_udf.median)
if TableFunction is not None:
@table_function(STRING)
class RegexSearch(TableFunction):
params = ['regex', 'search_string']
columns = ['match']
name = 'regex_search'
def initialize(self, regex=None, search_string=None):
self._iter = re.finditer(regex, search_string)
def iterate(self, idx):
return (next(self._iter).group(0),)
@table_function(DATE)
class DateSeries(TableFunction):
params = ['start', 'stop', 'step_seconds']
columns = ['date']
name = 'date_series'
def initialize(self, start, stop, step_seconds=86400):
self.start = format_date_time_sqlite(start)
self.stop = format_date_time_sqlite(stop)
step_seconds = int(step_seconds)
self.step_seconds = datetime.timedelta(seconds=step_seconds)
if (self.start.hour == 0 and
self.start.minute == 0 and
self.start.second == 0 and
step_seconds >= 86400):
self.format = '%Y-%m-%d'
elif (self.start.year == 1900 and
self.start.month == 1 and
self.start.day == 1 and
self.stop.year == 1900 and
self.stop.month == 1 and
self.stop.day == 1 and
step_seconds < 86400):
self.format = '%H:%M:%S'
else:
self.format = '%Y-%m-%d %H:%M:%S'
def iterate(self, idx):
if self.start > self.stop:
raise StopIteration
current = self.start
self.start += self.step_seconds
return (current.strftime(self.format),)
|
|
import ctypes
import itertools
import logging
import multiprocessing
import os
import pickle
import textwrap
import unittest
import warnings
from importlib import import_module
from io import StringIO
from django.core.management import call_command
from django.db import connections
from django.test import SimpleTestCase, TestCase
from django.test.utils import (
setup_databases as _setup_databases, setup_test_environment,
teardown_databases as _teardown_databases, teardown_test_environment,
)
from django.utils.datastructures import OrderedSet
from django.utils.deprecation import RemovedInDjango21Warning
try:
import tblib.pickling_support
except ImportError:
tblib = None
class DebugSQLTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
self.logger = logging.getLogger('django.db.backends')
self.logger.setLevel(logging.DEBUG)
super().__init__(stream, descriptions, verbosity)
def startTest(self, test):
self.debug_sql_stream = StringIO()
self.handler = logging.StreamHandler(self.debug_sql_stream)
self.logger.addHandler(self.handler)
super().startTest(test)
def stopTest(self, test):
super().stopTest(test)
self.logger.removeHandler(self.handler)
if self.showAll:
self.debug_sql_stream.seek(0)
self.stream.write(self.debug_sql_stream.read())
self.stream.writeln(self.separator2)
def addError(self, test, err):
super().addError(test, err)
self.debug_sql_stream.seek(0)
self.errors[-1] = self.errors[-1] + (self.debug_sql_stream.read(),)
def addFailure(self, test, err):
super().addFailure(test, err)
self.debug_sql_stream.seek(0)
self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),)
def printErrorList(self, flavour, errors):
for test, err, sql_debug in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % sql_debug)
class RemoteTestResult:
"""
Record information about which tests have succeeded and which have failed.
The sole purpose of this class is to record events in the child processes
so they can be replayed in the master process. As a consequence it doesn't
inherit unittest.TestResult and doesn't attempt to implement all its API.
The implementation matches the unpythonic coding style of unittest2.
"""
def __init__(self):
if tblib is not None:
tblib.pickling_support.install()
self.events = []
self.failfast = False
self.shouldStop = False
self.testsRun = 0
@property
def test_index(self):
return self.testsRun - 1
def _confirm_picklable(self, obj):
"""
Confirm that obj can be pickled and unpickled as multiprocessing will
need to pickle the exception in the child process and unpickle it in
the parent process. Let the exception rise, if not.
"""
pickle.loads(pickle.dumps(obj))
def _print_unpicklable_subtest(self, test, subtest, pickle_exc):
print("""
Subtest failed:
test: {}
subtest: {}
Unfortunately, the subtest that failed cannot be pickled, so the parallel
test runner cannot handle it cleanly. Here is the pickling error:
> {}
You should re-run this test with --parallel=1 to reproduce the failure
with a cleaner failure message.
""".format(test, subtest, pickle_exc))
def check_picklable(self, test, err):
# Ensure that sys.exc_info() tuples are picklable. This displays a
# clear multiprocessing.pool.RemoteTraceback generated in the child
# process instead of a multiprocessing.pool.MaybeEncodingError, making
# the root cause easier to figure out for users who aren't familiar
# with the multiprocessing module. Since we're in a forked process,
# our best chance to communicate with them is to print to stdout.
try:
self._confirm_picklable(err)
except Exception as exc:
original_exc_txt = repr(err[1])
original_exc_txt = textwrap.fill(original_exc_txt, 75, initial_indent=' ', subsequent_indent=' ')
pickle_exc_txt = repr(exc)
pickle_exc_txt = textwrap.fill(pickle_exc_txt, 75, initial_indent=' ', subsequent_indent=' ')
if tblib is None:
print("""
{} failed:
{}
Unfortunately, tracebacks cannot be pickled, making it impossible for the
parallel test runner to handle this exception cleanly.
In order to see the traceback, you should install tblib:
pip install tblib
""".format(test, original_exc_txt))
else:
print("""
{} failed:
{}
Unfortunately, the exception it raised cannot be pickled, making it impossible
for the parallel test runner to handle it cleanly.
Here's the error encountered while trying to pickle the exception:
{}
You should re-run this test with the --parallel=1 option to reproduce the
failure and get a correct traceback.
""".format(test, original_exc_txt, pickle_exc_txt))
raise
def check_subtest_picklable(self, test, subtest):
try:
self._confirm_picklable(subtest)
except Exception as exc:
self._print_unpicklable_subtest(test, subtest, exc)
raise
def stop_if_failfast(self):
if self.failfast:
self.stop()
def stop(self):
self.shouldStop = True
def startTestRun(self):
self.events.append(('startTestRun',))
def stopTestRun(self):
self.events.append(('stopTestRun',))
def startTest(self, test):
self.testsRun += 1
self.events.append(('startTest', self.test_index))
def stopTest(self, test):
self.events.append(('stopTest', self.test_index))
def addError(self, test, err):
self.check_picklable(test, err)
self.events.append(('addError', self.test_index, err))
self.stop_if_failfast()
def addFailure(self, test, err):
self.check_picklable(test, err)
self.events.append(('addFailure', self.test_index, err))
self.stop_if_failfast()
def addSubTest(self, test, subtest, err):
# Follow Python 3.5's implementation of unittest.TestResult.addSubTest()
# by not doing anything when a subtest is successful.
if err is not None:
# Call check_picklable() before check_subtest_picklable() since
# check_picklable() performs the tblib check.
self.check_picklable(test, err)
self.check_subtest_picklable(test, subtest)
self.events.append(('addSubTest', self.test_index, subtest, err))
self.stop_if_failfast()
def addSuccess(self, test):
self.events.append(('addSuccess', self.test_index))
def addSkip(self, test, reason):
self.events.append(('addSkip', self.test_index, reason))
def addExpectedFailure(self, test, err):
# If tblib isn't installed, pickling the traceback will always fail.
# However we don't want tblib to be required for running the tests
# when they pass or fail as expected. Drop the traceback when an
# expected failure occurs.
if tblib is None:
err = err[0], err[1], None
self.check_picklable(test, err)
self.events.append(('addExpectedFailure', self.test_index, err))
def addUnexpectedSuccess(self, test):
self.events.append(('addUnexpectedSuccess', self.test_index))
self.stop_if_failfast()
class RemoteTestRunner:
"""
Run tests and record everything but don't display anything.
The implementation matches the unpythonic coding style of unittest2.
"""
resultclass = RemoteTestResult
def __init__(self, failfast=False, resultclass=None):
self.failfast = failfast
if resultclass is not None:
self.resultclass = resultclass
def run(self, test):
result = self.resultclass()
unittest.registerResult(result)
result.failfast = self.failfast
test(result)
return result
def default_test_processes():
"""
Default number of test processes when using the --parallel option.
"""
# The current implementation of the parallel test runner requires
# multiprocessing to start subprocesses with fork().
if multiprocessing.get_start_method() != 'fork':
return 1
try:
return int(os.environ['DJANGO_TEST_PROCESSES'])
except KeyError:
return multiprocessing.cpu_count()
_worker_id = 0
def _init_worker(counter):
"""
Switch to databases dedicated to this worker.
This helper lives at module-level because of the multiprocessing module's
requirements.
"""
global _worker_id
with counter.get_lock():
counter.value += 1
_worker_id = counter.value
for alias in connections:
connection = connections[alias]
settings_dict = connection.creation.get_test_db_clone_settings(_worker_id)
# connection.settings_dict must be updated in place for changes to be
# reflected in django.db.connections. If the following line assigned
# connection.settings_dict = settings_dict, new threads would connect
# to the default database instead of the appropriate clone.
connection.settings_dict.update(settings_dict)
connection.close()
def _run_subsuite(args):
"""
Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult.
This helper lives at module-level and its arguments are wrapped in a tuple
because of the multiprocessing module's requirements.
"""
runner_class, subsuite_index, subsuite, failfast = args
runner = runner_class(failfast=failfast)
result = runner.run(subsuite)
return subsuite_index, result.events
class ParallelTestSuite(unittest.TestSuite):
"""
Run a series of tests in parallel in several processes.
While the unittest module's documentation implies that orchestrating the
execution of tests is the responsibility of the test runner, in practice,
it appears that TestRunner classes are more concerned with formatting and
displaying test results.
Since there are fewer use cases for customizing TestSuite than TestRunner,
implementing parallelization at the level of the TestSuite improves
interoperability with existing custom test runners. A single instance of a
test runner can still collect results from all tests without being aware
that they have been run in parallel.
"""
# In case someone wants to modify these in a subclass.
init_worker = _init_worker
run_subsuite = _run_subsuite
runner_class = RemoteTestRunner
def __init__(self, suite, processes, failfast=False):
self.subsuites = partition_suite_by_case(suite)
self.processes = processes
self.failfast = failfast
super().__init__()
def run(self, result):
"""
Distribute test cases across workers.
Return an identifier of each test case with its result in order to use
imap_unordered to show results as soon as they're available.
To minimize pickling errors when getting results from workers:
- pass back numeric indexes in self.subsuites instead of tests
- make tracebacks picklable with tblib, if available
Even with tblib, errors may still occur for dynamically created
exception classes such Model.DoesNotExist which cannot be unpickled.
"""
counter = multiprocessing.Value(ctypes.c_int, 0)
pool = multiprocessing.Pool(
processes=self.processes,
initializer=self.init_worker.__func__,
initargs=[counter])
args = [
(self.runner_class, index, subsuite, self.failfast)
for index, subsuite in enumerate(self.subsuites)
]
test_results = pool.imap_unordered(self.run_subsuite.__func__, args)
while True:
if result.shouldStop:
pool.terminate()
break
try:
subsuite_index, events = test_results.next(timeout=0.1)
except multiprocessing.TimeoutError:
continue
except StopIteration:
pool.close()
break
tests = list(self.subsuites[subsuite_index])
for event in events:
event_name = event[0]
handler = getattr(result, event_name, None)
if handler is None:
continue
test = tests[event[1]]
args = event[2:]
handler(test, *args)
pool.join()
return result
class DiscoverRunner:
"""
A Django test runner that uses unittest2 test discovery.
"""
test_suite = unittest.TestSuite
parallel_test_suite = ParallelTestSuite
test_runner = unittest.TextTestRunner
test_loader = unittest.defaultTestLoader
reorder_by = (TestCase, SimpleTestCase)
def __init__(self, pattern=None, top_level=None, verbosity=1,
interactive=True, failfast=False, keepdb=False,
reverse=False, debug_mode=False, debug_sql=False, parallel=0,
tags=None, exclude_tags=None, **kwargs):
self.pattern = pattern
self.top_level = top_level
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
self.keepdb = keepdb
self.reverse = reverse
self.debug_mode = debug_mode
self.debug_sql = debug_sql
self.parallel = parallel
self.tags = set(tags or [])
self.exclude_tags = set(exclude_tags or [])
@classmethod
def add_arguments(cls, parser):
parser.add_argument(
'-t', '--top-level-directory', action='store', dest='top_level', default=None,
help='Top level of project for unittest discovery.',
)
parser.add_argument(
'-p', '--pattern', action='store', dest='pattern', default="test*.py",
help='The test matching pattern. Defaults to test*.py.',
)
parser.add_argument(
'-k', '--keepdb', action='store_true', dest='keepdb', default=False,
help='Preserves the test DB between runs.'
)
parser.add_argument(
'-r', '--reverse', action='store_true', dest='reverse', default=False,
help='Reverses test cases order.',
)
parser.add_argument(
'--debug-mode', action='store_true', dest='debug_mode', default=False,
help='Sets settings.DEBUG to True.',
)
parser.add_argument(
'-d', '--debug-sql', action='store_true', dest='debug_sql', default=False,
help='Prints logged SQL queries on failure.',
)
parser.add_argument(
'--parallel', dest='parallel', nargs='?', default=1, type=int,
const=default_test_processes(), metavar='N',
help='Run tests using up to N parallel processes.',
)
parser.add_argument(
'--tag', action='append', dest='tags',
help='Run only tests with the specified tag. Can be used multiple times.',
)
parser.add_argument(
'--exclude-tag', action='append', dest='exclude_tags',
help='Do not run tests with the specified tag. Can be used multiple times.',
)
def setup_test_environment(self, **kwargs):
setup_test_environment(debug=self.debug_mode)
unittest.installHandler()
def build_suite(self, test_labels=None, extra_tests=None, **kwargs):
suite = self.test_suite()
test_labels = test_labels or ['.']
extra_tests = extra_tests or []
discover_kwargs = {}
if self.pattern is not None:
discover_kwargs['pattern'] = self.pattern
if self.top_level is not None:
discover_kwargs['top_level_dir'] = self.top_level
for label in test_labels:
kwargs = discover_kwargs.copy()
tests = None
label_as_path = os.path.abspath(label)
# if a module, or "module.ClassName[.method_name]", just run those
if not os.path.exists(label_as_path):
tests = self.test_loader.loadTestsFromName(label)
elif os.path.isdir(label_as_path) and not self.top_level:
# Try to be a bit smarter than unittest about finding the
# default top-level for a given directory path, to avoid
# breaking relative imports. (Unittest's default is to set
# top-level equal to the path, which means relative imports
# will result in "Attempted relative import in non-package.").
# We'd be happy to skip this and require dotted module paths
# (which don't cause this problem) instead of file paths (which
# do), but in the case of a directory in the cwd, which would
# be equally valid if considered as a top-level module or as a
# directory path, unittest unfortunately prefers the latter.
top_level = label_as_path
while True:
init_py = os.path.join(top_level, '__init__.py')
if os.path.exists(init_py):
try_next = os.path.dirname(top_level)
if try_next == top_level:
# __init__.py all the way down? give up.
break
top_level = try_next
continue
break
kwargs['top_level_dir'] = top_level
if not (tests and tests.countTestCases()) and is_discoverable(label):
# Try discovery if path is a package or directory
tests = self.test_loader.discover(start_dir=label, **kwargs)
# Make unittest forget the top-level dir it calculated from this
# run, to support running tests from two different top-levels.
self.test_loader._top_level_dir = None
suite.addTests(tests)
for test in extra_tests:
suite.addTest(test)
if self.tags or self.exclude_tags:
suite = filter_tests_by_tags(suite, self.tags, self.exclude_tags)
suite = reorder_suite(suite, self.reorder_by, self.reverse)
if self.parallel > 1:
parallel_suite = self.parallel_test_suite(suite, self.parallel, self.failfast)
# Since tests are distributed across processes on a per-TestCase
# basis, there's no need for more processes than TestCases.
parallel_units = len(parallel_suite.subsuites)
if self.parallel > parallel_units:
self.parallel = parallel_units
# If there's only one TestCase, parallelization isn't needed.
if self.parallel > 1:
suite = parallel_suite
return suite
def setup_databases(self, **kwargs):
return _setup_databases(
self.verbosity, self.interactive, self.keepdb, self.debug_sql,
self.parallel, **kwargs
)
def get_resultclass(self):
return DebugSQLTextTestResult if self.debug_sql else None
def get_test_runner_kwargs(self):
return {
'failfast': self.failfast,
'resultclass': self.get_resultclass(),
'verbosity': self.verbosity,
}
def run_checks(self):
# Checks are run after database creation since some checks require
# database access.
call_command('check', verbosity=self.verbosity)
def run_suite(self, suite, **kwargs):
kwargs = self.get_test_runner_kwargs()
runner = self.test_runner(**kwargs)
return runner.run(suite)
def teardown_databases(self, old_config, **kwargs):
"""
Destroys all the non-mirror databases.
"""
_teardown_databases(
old_config,
verbosity=self.verbosity,
parallel=self.parallel,
keepdb=self.keepdb,
)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
self.run_checks()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
def is_discoverable(label):
"""
Check if a test label points to a python package or file directory.
Relative labels like "." and ".." are seen as directories.
"""
try:
mod = import_module(label)
except (ImportError, TypeError):
pass
else:
return hasattr(mod, '__path__')
return os.path.isdir(os.path.abspath(label))
def reorder_suite(suite, classes, reverse=False):
"""
Reorders a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
If `reverse` is True, tests within classes are sorted in opposite order,
but test classes are not reversed.
"""
class_count = len(classes)
suite_class = type(suite)
bins = [OrderedSet() for i in range(class_count + 1)]
partition_suite_by_type(suite, classes, bins, reverse=reverse)
reordered_suite = suite_class()
for i in range(class_count + 1):
reordered_suite.addTests(bins[i])
return reordered_suite
def partition_suite_by_type(suite, classes, bins, reverse=False):
"""
Partitions a test suite by test type. Also prevents duplicated tests.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
reverse changes the ordering of tests within bins
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
suite_class = type(suite)
if reverse:
suite = reversed(tuple(suite))
for test in suite:
if isinstance(test, suite_class):
partition_suite_by_type(test, classes, bins, reverse=reverse)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].add(test)
break
else:
bins[-1].add(test)
def partition_suite_by_case(suite):
"""
Partitions a test suite by test case, preserving the order of tests.
"""
groups = []
suite_class = type(suite)
for test_type, test_group in itertools.groupby(suite, type):
if issubclass(test_type, unittest.TestCase):
groups.append(suite_class(test_group))
else:
for item in test_group:
groups.extend(partition_suite_by_case(item))
return groups
def setup_databases(*args, **kwargs):
warnings.warn(
'`django.test.runner.setup_databases()` has moved to '
'`django.test.utils.setup_databases()`.',
RemovedInDjango21Warning,
stacklevel=2,
)
return _setup_databases(*args, **kwargs)
def filter_tests_by_tags(suite, tags, exclude_tags):
suite_class = type(suite)
filtered_suite = suite_class()
for test in suite:
if isinstance(test, suite_class):
filtered_suite.addTests(filter_tests_by_tags(test, tags, exclude_tags))
else:
test_tags = set(getattr(test, 'tags', set()))
test_fn_name = getattr(test, '_testMethodName', str(test))
test_fn = getattr(test, test_fn_name, test)
test_fn_tags = set(getattr(test_fn, 'tags', set()))
all_tags = test_tags.union(test_fn_tags)
matched_tags = all_tags.intersection(tags)
if (matched_tags or not tags) and not all_tags.intersection(exclude_tags):
filtered_suite.addTest(test)
return filtered_suite
|
|
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
from sdc.str_arr_ext import (StringArray, StringArrayType, string_array_type,
pre_alloc_string_array, StringArrayPayloadType,
is_str_arr_typ)
from sdc.str_ext import string_type, gen_get_unicode_chars
from sdc.utilities.utils import to_array
import sdc
import operator
import numba
from numba import types, typing, generated_jit
from numba.extending import box, unbox, NativeValue
from numba.extending import models, register_model
from numba.extending import lower_builtin, overload_method, overload, intrinsic
from numba.targets.imputils import (impl_ret_new_ref, impl_ret_borrowed,
iternext_impl, impl_ret_untracked, RefType)
from numba import cgutils
from numba.typing.templates import signature, AbstractTemplate, infer, infer_global
from llvmlite import ir as lir
import llvmlite.binding as ll
from . import hset_ext
ll.add_symbol('init_set_string', hset_ext.init_set_string)
ll.add_symbol('insert_set_string', hset_ext.insert_set_string)
ll.add_symbol('len_set_string', hset_ext.len_set_string)
ll.add_symbol('set_in_string', hset_ext.set_in_string)
ll.add_symbol('set_iterator_string', hset_ext.set_iterator_string)
ll.add_symbol('set_itervalid_string', hset_ext.set_itervalid_string)
ll.add_symbol('set_nextval_string', hset_ext.set_nextval_string)
ll.add_symbol('num_total_chars_set_string', hset_ext.num_total_chars_set_string)
ll.add_symbol('populate_str_arr_from_set', hset_ext.populate_str_arr_from_set)
# similar to types.Container.Set
class SetType(types.Container):
def __init__(self, dtype):
self.dtype = dtype
super(SetType, self).__init__(
name='SetType({})'.format(dtype))
@property
def key(self):
return self.dtype
@property
def iterator_type(self):
return SetIterType(self)
def is_precise(self):
return self.dtype.is_precise()
set_string_type = SetType(string_type)
class SetIterType(types.BaseContainerIterator):
container_class = SetType
register_model(SetType)(models.OpaqueModel)
_init_set_string = types.ExternalFunction("init_set_string",
set_string_type())
def init_set_string():
return set()
@overload(init_set_string)
def init_set_overload():
return lambda: _init_set_string()
add_set_string = types.ExternalFunction("insert_set_string",
types.void(set_string_type, types.voidptr))
len_set_string = types.ExternalFunction("len_set_string",
types.intp(set_string_type))
num_total_chars_set_string = types.ExternalFunction("num_total_chars_set_string",
types.intp(set_string_type))
# TODO: box set(string)
@generated_jit(nopython=True, cache=True)
def build_set(A):
if is_str_arr_typ(A):
return _build_str_set_impl
else:
return lambda A: set(A)
def _build_str_set_impl(A):
str_arr = sdc.hiframes.api.dummy_unbox_series(A)
str_set = init_set_string()
n = len(str_arr)
for i in range(n):
_str = str_arr[i]
str_set.add(_str)
return str_set
# TODO: remove since probably unused
@overload(set)
def init_set_string_array(A):
if is_str_arr_typ(A):
return _build_str_set_impl
@overload_method(SetType, 'add')
def set_add_overload(set_obj, item):
# TODO: expand to other set types
assert set_obj == set_string_type and item == string_type
def add_impl(set_obj, item):
return add_set_string(set_obj, item._data)
return add_impl
@overload(len)
def len_set_str_overload(A):
if A == set_string_type:
def len_impl(A):
return len_set_string(A)
return len_impl
# FIXME: overload fails in lowering sometimes!
@lower_builtin(len, set_string_type)
def lower_len_set_impl(context, builder, sig, args):
def len_impl(str_set):
return len_set_string(str_set)
res = context.compile_internal(builder, len_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@infer
class InSet(AbstractTemplate):
key = "in"
def generic(self, args, kws):
_, cont_typ = args
if cont_typ == set_string_type:
return signature(types.boolean, cont_typ.dtype, cont_typ)
@infer_global(operator.contains)
class InSetOp(AbstractTemplate):
def generic(self, args, kws):
cont_typ, _ = args
if cont_typ == set_string_type:
return signature(types.boolean, cont_typ, cont_typ.dtype)
@lower_builtin("in", string_type, set_string_type)
def lower_dict_in(context, builder, sig, args):
fnty = lir.FunctionType(lir.IntType(1), [lir.IntType(8).as_pointer(),
lir.IntType(8).as_pointer()])
fn = builder.module.get_or_insert_function(fnty, name="set_in_string")
return builder.call(fn, args)
@lower_builtin(operator.contains, set_string_type, string_type)
def lower_dict_in_op(context, builder, sig, args):
set_str, unicode_str = args
char_str = gen_get_unicode_chars(context, builder, unicode_str)
fnty = lir.FunctionType(lir.IntType(1), [lir.IntType(8).as_pointer(),
lir.IntType(8).as_pointer()])
fn = builder.module.get_or_insert_function(fnty, name="set_in_string")
return builder.call(fn, [char_str, set_str])
@overload(to_array)
def to_array_overload(A):
if A == set_string_type:
#
def set_string_to_array(A):
# TODO: support unicode
num_total_chars = num_total_chars_set_string(A)
num_strs = len(A)
str_arr = pre_alloc_string_array(num_strs, num_total_chars)
populate_str_arr_from_set(A, str_arr)
return str_arr
return set_string_to_array
@intrinsic
def populate_str_arr_from_set(typingctx, in_set_typ, in_str_arr_typ=None):
assert in_set_typ == set_string_type
assert is_str_arr_typ(in_str_arr_typ)
def codegen(context, builder, sig, args):
in_set, in_str_arr = args
string_array = context.make_helper(builder, string_array_type, in_str_arr)
fnty = lir.FunctionType(lir.VoidType(),
[lir.IntType(8).as_pointer(),
lir.IntType(32).as_pointer(),
lir.IntType(8).as_pointer(),
])
fn_getitem = builder.module.get_or_insert_function(fnty,
name="populate_str_arr_from_set")
builder.call(fn_getitem, [in_set, string_array.offsets,
string_array.data])
return context.get_dummy_value()
return types.void(set_string_type, string_array_type), codegen
# TODO: delete iterator
@register_model(SetIterType)
class StrSetIteratorModel(models.StructModel):
def __init__(self, dmm, fe_type):
members = [('itp', types.Opaque('SetIterPtr')),
('set', set_string_type)]
super(StrSetIteratorModel, self).__init__(dmm, fe_type, members)
@lower_builtin('getiter', SetType)
def getiter_set(context, builder, sig, args):
fnty = lir.FunctionType(lir.IntType(8).as_pointer(),
[lir.IntType(8).as_pointer()])
fn = builder.module.get_or_insert_function(fnty, name="set_iterator_string")
itp = builder.call(fn, args)
iterobj = context.make_helper(builder, sig.return_type)
iterobj.itp = itp
iterobj.set = args[0]
return iterobj._getvalue()
@lower_builtin('iternext', SetIterType)
@iternext_impl(RefType.NEW)
def iternext_setiter(context, builder, sig, args, result):
iterty, = sig.args
it, = args
iterobj = context.make_helper(builder, iterty, value=it)
fnty = lir.FunctionType(lir.IntType(1),
[lir.IntType(8).as_pointer(), lir.IntType(8).as_pointer()])
fn = builder.module.get_or_insert_function(fnty, name="set_itervalid_string")
is_valid = builder.call(fn, [iterobj.itp, iterobj.set])
result.set_valid(is_valid)
fnty = lir.FunctionType(lir.IntType(8).as_pointer(),
[lir.IntType(8).as_pointer()])
fn = builder.module.get_or_insert_function(fnty, name="set_nextval_string")
kind = numba.unicode.PY_UNICODE_1BYTE_KIND
def std_str_to_unicode(std_str):
length = sdc.str_ext.get_std_str_len(std_str)
ret = numba.unicode._empty_string(kind, length)
sdc.str_arr_ext._memcpy(
ret._data, sdc.str_ext.get_c_str(std_str), length, 1)
sdc.str_ext.del_str(std_str)
return ret
with builder.if_then(is_valid):
val = builder.call(fn, [iterobj.itp])
val = context.compile_internal(
builder,
std_str_to_unicode,
string_type(sdc.str_ext.std_str_type),
[val])
result.yield_(val)
|
|
import os
import sys
from bcbio.rnaseq import (featureCounts, cufflinks, oncofuse, count, dexseq,
express, variation, stringtie, sailfish)
from bcbio.ngsalign import bowtie2, alignprep
from bcbio.variation import vardict
import bcbio.pipeline.datadict as dd
from bcbio.utils import filter_missing, flatten
from bcbio.log import logger
def rnaseq_variant_calling(samples, run_parallel):
"""
run RNA-seq variant calling using GATK
"""
samples = run_parallel("run_rnaseq_variant_calling", samples)
samples = run_parallel("run_rnaseq_joint_genotyping", [samples])
return samples
def run_rnaseq_variant_calling(data):
variantcaller = dd.get_variantcaller(data)
if isinstance(variantcaller, list) and len(variantcaller) > 1:
logger.error("Only one variantcaller can be run for RNA-seq at "
"this time. Post an issue here "
"(https://github.com/chapmanb/bcbio-nextgen/issues) "
"if this is something you need to do.")
sys.exit(1)
if variantcaller and "gatk" in variantcaller:
data = variation.rnaseq_gatk_variant_calling(data)
if vardict.get_vardict_command(data):
data = variation.rnaseq_vardict_variant_calling(data)
return [[data]]
def run_rnaseq_joint_genotyping(*samples):
data = samples[0][0]
variantcaller = dd.get_variantcaller(data)
if not variantcaller:
return samples
if "gatk" not in variantcaller:
return samples
ref_file = dd.get_ref_file(data)
out_file = os.path.join(dd.get_work_dir(data, "."), "variation", "combined.vcf")
if variantcaller and "gatk" in variantcaller:
vrn_files = [dd.get_vrn_file(d) for d in dd.sample_data_iterator(samples)]
out_file = variation.gatk_joint_calling(data, vrn_files, ref_file, out_file)
updated_samples = []
for data in dd.sample_data_iterator(samples):
data = dd.set_square_vcf(data, out_file)
updated_samples.append([data])
return updated_samples
return samples
def quantitate_expression_parallel(samples, run_parallel):
"""
quantitate expression, all programs run here should be multithreaded to
take advantage of the threaded run_parallel environment
"""
data = samples[0][0]
samples = run_parallel("generate_transcript_counts", samples)
samples = run_parallel("run_sailfish", samples)
samples = sailfish.combine_sailfish(samples)
if "cufflinks" in dd.get_expression_caller(data):
samples = run_parallel("run_cufflinks", samples)
if "stringtie" in dd.get_expression_caller(data):
samples = run_parallel("run_stringtie_expression", samples)
return samples
def quantitate_expression_noparallel(samples, run_parallel):
"""
run transcript quantitation for algorithms that don't run in parallel
"""
data = samples[0][0]
if "express" in dd.get_expression_caller(data):
samples = run_parallel("run_express", samples)
samples = run_parallel("run_dexseq", samples)
return samples
def generate_transcript_counts(data):
"""Generate counts per transcript and per exon from an alignment"""
data["count_file"] = featureCounts.count(data)
if dd.get_fusion_mode(data, False):
oncofuse_file = oncofuse.run(data)
if oncofuse_file:
data = dd.set_oncofuse_file(data, oncofuse_file)
if dd.get_transcriptome_align(data) and not dd.get_transcriptome_bam(data):
file1, file2 = None, None
if dd.get_disambiguate(data):
bam_path = data["work_bam"]
fastq_paths = alignprep._bgzip_from_bam(bam_path, data["dirs"], data["config"], is_retry=False, output_infix='-transcriptome')
if len(fastq_paths) == 2:
file1, file2 = fastq_paths
else:
file1, file2 = fastq_paths[0], None
else:
file1, file2 = dd.get_input_sequence_files(data)
ref_file = dd.get_ref_file(data)
logger.info("Transcriptome alignment was flagged to run, but the "
"transcriptome BAM file was not found. Aligning to the "
"transcriptome with bowtie2.")
data = bowtie2.align_transcriptome(file1, file2, ref_file, data)
return [[data]]
def run_stringtie_expression(data):
"""Calculate transcript and gene level FPKM with Stringtie"""
data = stringtie.run_stringtie_expression(data)
return [[data]]
def run_dexseq(data):
"""Quantitate exon-level counts with DEXSeq"""
if dd.get_dexseq_gff(data, None):
data = dexseq.bcbio_run(data)
return [[data]]
def run_express(data):
"""Quantitative isoform expression by eXpress"""
data = express.run(data)
return [[data]]
def combine_express(samples, combined):
"""Combine tpm, effective counts and fpkm from express results"""
to_combine = [dd.get_express_counts(x) for x in
dd.sample_data_iterator(samples) if dd.get_express_counts(x)]
gtf_file = dd.get_gtf_file(samples[0][0])
isoform_to_gene_file = os.path.join(os.path.dirname(combined), "isoform_to_gene.txt")
isoform_to_gene_file = express.isoform_to_gene_name(gtf_file, isoform_to_gene_file)
if len(to_combine) > 0:
eff_counts_combined_file = os.path.splitext(combined)[0] + ".isoform.express_counts"
eff_counts_combined = count.combine_count_files(to_combine, eff_counts_combined_file, ext=".counts")
to_combine = [dd.get_express_tpm(x) for x in
dd.sample_data_iterator(samples) if dd.get_express_tpm(x)]
tpm_counts_combined_file = os.path.splitext(combined)[0] + ".isoform.express_tpm"
tpm_counts_combined = count.combine_count_files(to_combine, tpm_counts_combined_file)
to_combine = [dd.get_express_fpkm(x) for x in dd.sample_data_iterator(samples)
if dd.get_express_fpkm(x)]
fpkm_counts_combined_file = os.path.splitext(combined)[0] + ".isoform.express_fpkm"
fpkm_counts_combined = count.combine_count_files(to_combine, fpkm_counts_combined_file, ext=".fpkm")
return {'counts': eff_counts_combined, 'tpm': tpm_counts_combined,
'fpkm': fpkm_counts_combined, 'isoform_to_gene': isoform_to_gene_file}
return {}
def run_cufflinks(data):
"""Quantitate transcript expression with Cufflinks"""
if "cufflinks" in dd.get_tools_off(data):
return [[data]]
work_bam = dd.get_work_bam(data)
ref_file = dd.get_sam_ref(data)
out_dir, fpkm_file, fpkm_isoform_file = cufflinks.run(work_bam, ref_file, data)
data = dd.set_cufflinks_dir(data, out_dir)
data = dd.set_fpkm(data, fpkm_file)
data = dd.set_fpkm_isoform(data, fpkm_isoform_file)
return [[data]]
def cufflinks_assemble(data):
bam_file = dd.get_work_bam(data)
ref_file = dd.get_sam_ref(data)
out_dir = os.path.join(dd.get_work_dir(data), "assembly")
num_cores = dd.get_num_cores(data)
assembled_gtf = cufflinks.assemble(bam_file, ref_file, num_cores, out_dir, data)
dd.get_assembled_gtf(data).append(assembled_gtf)
return [[data]]
def cufflinks_merge(*samples):
to_merge = filter_missing(flatten([dd.get_assembled_gtf(data) for data in
dd.sample_data_iterator(samples)]))
data = samples[0][0]
ref_file = dd.get_sam_ref(data)
gtf_file = dd.get_gtf_file(data)
num_cores = dd.get_num_cores(data)
merged_gtf = cufflinks.merge(to_merge, ref_file, gtf_file, num_cores,
samples[0][0])
updated_samples = []
for data in dd.sample_data_iterator(samples):
data = dd.set_merged_gtf(data, merged_gtf)
updated_samples.append([data])
return updated_samples
def stringtie_merge(*samples):
to_merge = filter_missing(flatten([dd.get_assembled_gtf(data) for data in
dd.sample_data_iterator(samples)]))
data = samples[0][0]
ref_file = dd.get_sam_ref(data)
gtf_file = dd.get_gtf_file(data)
num_cores = dd.get_num_cores(data)
merged_gtf = stringtie.merge(to_merge, ref_file, gtf_file, num_cores, data)
updated_samples = []
for data in dd.sample_data_iterator(samples):
data = dd.set_merged_gtf(data, merged_gtf)
updated_samples.append([data])
return updated_samples
def assemble_transcripts(run_parallel, samples):
"""
assembly strategy rationale implemented as suggested in
http://www.nature.com/nprot/journal/v7/n3/full/nprot.2012.016.html
run Cufflinks in without a reference GTF for each individual sample
merge the assemblies with Cuffmerge using a reference GTF
"""
assembler = dd.get_in_samples(samples, dd.get_transcript_assembler)
data = samples[0][0]
if assembler:
if "cufflinks" in assembler:
samples = run_parallel("cufflinks_assemble", samples)
if "stringtie" in assembler:
samples = run_parallel("run_stringtie_expression", samples)
if "stringtie" in assembler and stringtie.supports_merge(data):
samples = run_parallel("stringtie_merge", [samples])
else:
samples = run_parallel("cufflinks_merge", [samples])
return samples
def combine_files(samples):
"""
after quantitation, combine the counts/FPKM/TPM/etc into a single table with
all samples
"""
gtf_file = dd.get_gtf_file(samples[0][0], None)
dexseq_gff = dd.get_dexseq_gff(samples[0][0])
# combine featureCount files
count_files = filter_missing([dd.get_count_file(x[0]) for x in samples])
combined = count.combine_count_files(count_files, ext=".counts")
annotated = count.annotate_combined_count_file(combined, gtf_file)
# combine eXpress files
express_counts_combined = combine_express(samples, combined)
# combine Cufflinks files
fpkm_combined_file = os.path.splitext(combined)[0] + ".fpkm"
fpkm_files = filter_missing([dd.get_fpkm(x[0]) for x in samples])
if fpkm_files:
fpkm_combined = count.combine_count_files(fpkm_files, fpkm_combined_file)
else:
fpkm_combined = None
fpkm_isoform_combined_file = os.path.splitext(combined)[0] + ".isoform.fpkm"
isoform_files = filter_missing([dd.get_fpkm_isoform(x[0]) for x in samples])
if isoform_files:
fpkm_isoform_combined = count.combine_count_files(isoform_files,
fpkm_isoform_combined_file,
".isoform.fpkm")
else:
fpkm_isoform_combined = None
# combine DEXseq files
dexseq_combined_file = os.path.splitext(combined)[0] + ".dexseq"
to_combine_dexseq = filter_missing([dd.get_dexseq_counts(data[0]) for data in samples])
if to_combine_dexseq:
dexseq_combined = count.combine_count_files(to_combine_dexseq,
dexseq_combined_file, ".dexseq")
dexseq.create_dexseq_annotation(dexseq_gff, dexseq_combined)
else:
dexseq_combined = None
updated_samples = []
for data in dd.sample_data_iterator(samples):
data = dd.set_combined_counts(data, combined)
if annotated:
data = dd.set_annotated_combined_counts(data, annotated)
if fpkm_combined:
data = dd.set_combined_fpkm(data, fpkm_combined)
if fpkm_isoform_combined:
data = dd.set_combined_fpkm_isoform(data, fpkm_isoform_combined)
if express_counts_combined:
data = dd.set_express_counts(data, express_counts_combined['counts'])
data = dd.set_express_tpm(data, express_counts_combined['tpm'])
data = dd.set_express_fpkm(data, express_counts_combined['fpkm'])
data = dd.set_isoform_to_gene(data, express_counts_combined['isoform_to_gene'])
if dexseq_combined:
data = dd.set_dexseq_counts(data, dexseq_combined_file)
updated_samples.append([data])
return updated_samples
|
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DataONE Command Line Interface."""
import logging
import optparse
import sys
import traceback
import d1_cli.impl.command_parser
import d1_cli.impl.exceptions
import d1_cli.impl.session
import d1_cli.impl.util
import d1_cli.version
import d1_common.const
import d1_common.types.exceptions
import d1_common.util
import d1_common.utils.ulog
# def module_path():
# """ This will get us the program's directory,
# even if we are frozen using py2exe"""
#
# if we_are_frozen():
# return os.path.dirname(unicode(sys.executable, sys.getfilesystemencoding( )))
#
# return os.path.dirname(unicode(__file__, sys.getfilesystemencoding( )))
#
# sys.path.append(os.path.join(module_path(), u'./impl'))
def main():
print("DataONE Command Line Interface ({})".format(d1_cli.version.__version__))
parser = optparse.OptionParser(
usage="usage: %prog [command] ...", option_list=option_list
)
options, commands = parser.parse_args()
command_parser = d1_cli.impl.command_parser.CLI()
handle_options(command_parser, options)
d1_common.utils.ulog.setup(options.debug)
# If the user passed commands on the command line, run them.
for command in commands:
try:
command_parser.onecmd(command)
except (
d1_cli.impl.exceptions.InvalidArguments,
d1_cli.impl.exceptions.CLIError,
) as e:
d1_cli.impl.util.print_error(e)
except d1_common.types.exceptions.DataONEException as e:
d1_cli.impl.util.print_error("DataONE Node returned error:")
d1_cli.impl.util.print_error(e)
# except RuntimeError:
# if options.debug:
# raise
# handle_unexpected_exception()
# Enter the main processing loop.
while True:
try:
command_parser.cmdloop()
except KeyboardInterrupt:
command_parser.do_exit("")
except SystemExit:
break
except (
d1_cli.impl.exceptions.InvalidArguments,
d1_cli.impl.exceptions.CLIError,
) as e:
d1_cli.impl.util.print_error(e)
# raise
except d1_common.types.exceptions.DataONEException as e:
# Suppress trace information in DataONEExceptions if not in debug mode.
if not options.debug:
e.traceInformation = None
d1_cli.impl.util.print_error("DataONE Node returned error:")
d1_cli.impl.util.print_error(e)
# except RuntimeError:
# if options.debug:
# raise
# handle_unexpected_exception()
def log_setup(debug):
if debug:
logging.getLogger("").setLevel(logging.DEBUG)
else:
logging.getLogger("").setLevel(logging.ERROR)
formatter = logging.Formatter("%(levelname)-8s %(message)s")
console_logger = logging.StreamHandler(sys.stdout)
console_logger.setFormatter(formatter)
logging.getLogger("").addHandler(console_logger)
# Command-line options.
option_list = [
optparse.make_option(
"--" + d1_cli.impl.session.CHECKSUM_NAME,
action="store",
dest="algorithm",
help="Checksum algorithm used for a Science Data Object.",
),
optparse.make_option(
"--" + d1_cli.impl.session.ANONYMOUS_NAME,
action="store_true",
dest="anonymous",
help="Ignore any installed certificates and connect anonymously",
),
optparse.make_option(
"--no-" + d1_cli.impl.session.ANONYMOUS_NAME,
action="store_false",
dest="anonymous",
help="Use the installed certificates and do not connect anonymously",
),
optparse.make_option(
"--" + d1_cli.impl.session.AUTH_MN_NAME,
action="store",
dest="authoritative_mn",
metavar="MN-URI",
help="Authoritative Member Node for generating System Metadata.",
),
optparse.make_option(
"--" + d1_cli.impl.session.CERT_FILENAME_NAME,
action="store",
dest="cert_file",
metavar="FILE",
help="Path to client certificate",
),
optparse.make_option(
"--" + d1_cli.impl.session.COUNT_NAME,
action="store",
dest="count",
type="int",
help="Maximum number of items to display",
),
optparse.make_option(
"--" + d1_cli.impl.session.CN_URL_NAME,
action="store",
dest="cn_url",
metavar="URI",
help="URI to use for the Coordinating Node",
),
optparse.make_option(
"--" + d1_cli.impl.session.FROM_DATE_NAME,
action="store",
dest="from_date",
metavar="DATE",
help="Start time used by operations that accept a date range",
),
optparse.make_option(
"--" + d1_cli.impl.session.KEY_FILENAME_NAME,
action="store",
dest="key_file",
metavar="FILE",
help="File of client private key (not required if key is in cert-file",
),
optparse.make_option(
"--" + d1_cli.impl.session.MN_URL_NAME,
action="store",
dest="mn_url",
metavar="URI",
help="Member Node URL",
),
optparse.make_option(
"--" + d1_cli.impl.session.FORMAT_NAME,
action="store",
dest="object_format",
metavar="OBJECT-FORMAT",
help="ID for the Object Format to use when generating System Metadata",
),
optparse.make_option(
"--formatId",
action="store",
dest="object_format",
metavar="OBJECT-FORMAT",
help="ID for the Object Format to use when generating System Metadata",
),
optparse.make_option(
"--" + d1_cli.impl.session.QUERY_STRING_NAME,
action="store",
dest="query_string",
metavar="QUERY",
help="Query string (SOLR or Lucene query syntax) for searches",
),
optparse.make_option(
"--" + d1_cli.impl.session.OWNER_NAME,
action="store",
dest="rights_holder",
metavar="SUBJECT",
help="Subject of the rights holder to use when generating System Metadata",
),
optparse.make_option(
"--" + d1_cli.impl.session.SEARCH_FORMAT_NAME,
action="store",
dest="search_object_format",
metavar="OBJECT-FORMAT",
help="Include only objects of this format when searching",
),
optparse.make_option(
"--" + d1_cli.impl.session.START_NAME,
action="store",
dest="start",
type="int",
help="First item to display for operations that display a list_objects of items",
),
optparse.make_option(
"--" + d1_cli.impl.session.TO_DATE_NAME,
action="store",
dest="to_date",
metavar="DATE",
help="End time used by operations that accept a date range",
),
optparse.make_option(
"-v",
"--" + d1_cli.impl.session.VERBOSE_NAME,
action="store_true",
dest="verbose",
help="Display more information",
),
optparse.make_option(
"--no-" + d1_cli.impl.session.VERBOSE_NAME,
action="store_false",
dest="verbose",
help="Display less information",
),
optparse.make_option(
"--" + d1_cli.impl.session.EDITOR_NAME,
action="store_true",
dest="editor",
help="Editor to use for editing operation queue",
),
optparse.make_option(
"--no-" + d1_cli.impl.session.EDITOR_NAME,
action="store_false",
dest="editor",
help="Use editor specified in EDITOR environment variable",
),
optparse.make_option(
"--allow-replication",
action="store_true",
dest="action_allowReplication",
help="Allow objects to be replicated.",
),
optparse.make_option(
"--disallow-replication",
action="store_false",
dest="action_allowReplication",
help="Do not allow objects to be replicated.",
),
optparse.make_option(
"--replicas",
action="store",
dest="action_numReplicas",
metavar="#replicas",
help="Set the preferred number of replicas.",
),
optparse.make_option(
"--add_blocked",
action="store",
dest="action_blockNode",
metavar="MN",
help="Add blocked Member Node to access policy.",
),
optparse.make_option(
"--add_preferred",
action="store",
dest="action_preferNode",
metavar="MN",
help="Add Member Node to list_objects of preferred replication targets.",
),
# optparse.make_option('--configure', action='store_true', dest='action_configure',
# help='Perform initial configuration'),
optparse.make_option(
"--cn",
action="store",
dest="cn_host",
metavar="HOST",
help="Name of the host to use for the Coordinating Node",
),
optparse.make_option(
"--mn",
action="store",
dest="mn_host",
metavar="HOST",
help="Name of the host to use for the Member Node",
),
optparse.make_option(
"-q",
"--quiet",
action="store_false",
dest="verbose",
help="Display less information",
),
optparse.make_option(
"--debug", action="store_true", help="Print full stack trace and exit on errors"
),
]
def handle_options(cli, options):
try:
if options.algorithm:
cli.d1.session_set_parameter(
d1_cli.impl.session.CHECKSUM_NAME, options.algorithm
)
if options.anonymous:
cli.d1.session_set_parameter(
d1_cli.impl.session.ANONYMOUS_NAME, options.anonymous
)
if options.authoritative_mn:
cli.d1.session_set_parameter(
d1_cli.impl.session.AUTH_MN_NAME, options.authoritative_mn
)
if options.cert_file:
cli.d1.session_set_parameter(
d1_cli.impl.session.CERT_FILENAME_NAME, options.cert_file
)
if options.count:
cli.d1.session_set_parameter(d1_cli.impl.session.COUNT_NAME, options.count)
if options.cn_url:
cli.d1.session_set_parameter(
d1_cli.impl.session.CN_URL_NAME, options.cn_url
)
if options.cn_host:
url = "".join(
(
d1_common.const.DEFAULT_CN_PROTOCOL,
"://",
options.cn_host,
d1_common.const.DEFAULT_CN_PATH,
)
)
cli.d1.session_set_parameter(d1_cli.impl.session.CN_URL_NAME, url)
if options.from_date:
cli.d1.session_set_parameter(
d1_cli.impl.session.FROM_DATE_NAME, options.from_date
)
if options.key_file:
cli.d1.session_set_parameter(
d1_cli.impl.session.KEY_FILENAME_NAME, options.key_file
)
if options.mn_url:
cli.d1.session_set_parameter(
d1_cli.impl.session.MN_URL_NAME, options.mn_url
)
if options.mn_host:
url = "".join(
(
d1_common.const.DEFAULT_MN_PROTOCOL,
"://",
options.mn_host,
d1_common.const.DEFAULT_MN_PATH,
)
)
cli.d1.session_set_parameter(d1_cli.impl.session.MN_URL_NAME, url)
if options.object_format:
cli.d1.session_set_parameter(
d1_cli.impl.session.FORMAT_NAME, options.object_format
)
if options.query_string:
cli.d1.session_set_parameter(
d1_cli.impl.session.QUERY_STRING_NAME, options.query_string
)
if options.rights_holder:
cli.d1.session_set_parameter(
d1_cli.impl.session.OWNER_NAME, options.rights_holder
)
if options.search_object_format:
try:
cli.d1.session_set_parameter(
d1_cli.impl.session.SEARCH_FORMAT_NAME, options.search_object_format
)
except ValueError as e:
d1_cli.impl.util.print_error(e.args[0])
if options.start:
cli.d1.session_set_parameter(d1_cli.impl.session.START_NAME, options.start)
if options.to_date:
cli.d1.session_set_parameter(
d1_cli.impl.session.TO_DATE_NAME, options.to_date
)
if options.verbose:
cli.d1.session_set_parameter(
d1_cli.impl.session.VERBOSE_NAME, options.verbose
)
if options.editor:
cli.d1.session_set_parameter(
d1_cli.impl.session.EDITOR_NAME, options.editor
)
# Replication.
if options.action_allowReplication is not None:
if options.action_allowReplication:
cli.d1.replication_policy_set_replication_allowed(True)
else:
cli.d1.replication_policy_set_replication_allowed(False)
if options.action_numReplicas:
cli.d1.replication_policy_set_number_of_replicas(options.action_numReplicas)
if options.action_blockNode:
cli.d1.get_replication_policy().add_blocked(options.action_blockNode)
if options.action_preferNode:
cli.d1.get_replication_policy().add_preferred(options.action_preferNode)
except d1_cli.impl.exceptions.InvalidArguments as e:
d1_cli.impl.util.print_error(e)
# except RuntimeError:
# d1_cli.impl.util._handle_unexpected_exception()
def handle_unexpected_exception(max_traceback_levels=100):
"""Suppress stack traces for common errors and provide hints for how to resolve
them."""
exc_type, exc_msgs = sys.exc_info()[:2]
if exc_type.__name__ == "SSLError":
d1_cli.impl.util.print_error(
"""HTTPS / TLS / SSL / X.509v3 Certificate Error:
An HTTPS connection could not be established. Verify that a DataONE node
responds at the URL provided in the cn-url or mn-url session variable. If the
URL is valid and if you intended to connect without authentication, make sure
that the session variable, "anonymous", is set to True. If you intended to
connect with authentication, make sure that the parameter, "cert-file", points
to a valid certificate from CILogon. If the certificate has the private
key in a separate file, also set "key-file" to the private key file.
Otherwise, set "key-file" to None. Note that CILogon certificates must be
renewed after 18 hours.
"""
)
elif exc_type.__name__ == "timeout":
d1_cli.impl.util.print_error(
"""Timeout error:
A connection to a DataONE node timed out. Verify that a DataONE node responds
at the URL provided in the cn-url or mn-url session variable.
"""
)
else:
_print_unexpected_exception(max_traceback_levels)
def _print_unexpected_exception(max_traceback_levels=100):
exc_class, exc_msgs, exc_traceback = sys.exc_info()
d1_cli.impl.util.print_error("Error:")
d1_cli.impl.util.print_error(" Name: {}".format(exc_class.__name__))
d1_cli.impl.util.print_error(" Value: {}".format(exc_msgs))
try:
exc_args = exc_msgs.__dict__["args"]
except KeyError:
exc_args = "<no args>"
d1_cli.impl.util.print_error(" Args: {}".format(exc_args))
d1_cli.impl.util.print_error(" Traceback:")
for tb in traceback.format_tb(exc_traceback, max_traceback_levels):
d1_cli.impl.util.print_error(" {}".format(tb))
if __name__ == "__main__":
sys.exit(main())
|
|
import six
import signal
import logging
import warnings
from twisted.internet import reactor, defer
from zope.interface.verify import verifyClass, DoesNotImplement
from scrapy.core.engine import ExecutionEngine
from scrapy.resolver import CachingThreadedResolver
from scrapy.interfaces import ISpiderLoader
from scrapy.extension import ExtensionManager
from scrapy.settings import Settings
from scrapy.signalmanager import SignalManager
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.ossignal import install_shutdown_handlers, signal_names
from scrapy.utils.misc import load_object
from scrapy.utils.log import LogCounterHandler, configure_logging, log_scrapy_info
from scrapy import signals
logger = logging.getLogger(__name__)
class Crawler(object):
def __init__(self, spidercls, settings=None):
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
self.spidercls = spidercls
self.settings = settings.copy()
self.spidercls.update_settings(self.settings)
self.signals = SignalManager(self)
self.stats = load_object(self.settings['STATS_CLASS'])(self)
handler = LogCounterHandler(self, level=settings.get('LOG_LEVEL'))
logging.root.addHandler(handler)
# lambda is assigned to Crawler attribute because this way it is not
# garbage collected after leaving __init__ scope
self.__remove_handler = lambda: logging.root.removeHandler(handler)
self.signals.connect(self.__remove_handler, signals.engine_stopped)
lf_cls = load_object(self.settings['LOG_FORMATTER'])
self.logformatter = lf_cls.from_crawler(self)
self.extensions = ExtensionManager.from_crawler(self)
self.settings.freeze()
self.crawling = False
self.spider = None
self.engine = None
@property
def spiders(self):
if not hasattr(self, '_spiders'):
warnings.warn("Crawler.spiders is deprecated, use "
"CrawlerRunner.spider_loader or instantiate "
"scrapy.spiderloader.SpiderLoader with your "
"settings.",
category=ScrapyDeprecationWarning, stacklevel=2)
self._spiders = _get_spider_loader(self.settings.frozencopy())
return self._spiders
@defer.inlineCallbacks
def crawl(self, *args, **kwargs):
assert not self.crawling, "Crawling already taking place"
self.crawling = True
try:
self.spider = self._create_spider(*args, **kwargs)
self.engine = self._create_engine()
start_requests = iter(self.spider.start_requests())
yield self.engine.open_spider(self.spider, start_requests)
yield defer.maybeDeferred(self.engine.start)
except Exception:
self.crawling = False
raise
def _create_spider(self, *args, **kwargs):
return self.spidercls.from_crawler(self, *args, **kwargs)
def _create_engine(self):
return ExecutionEngine(self, lambda _: self.stop())
@defer.inlineCallbacks
def stop(self):
if self.crawling:
self.crawling = False
yield defer.maybeDeferred(self.engine.stop)
class CrawlerRunner(object):
"""
This is a convenient helper class that keeps track of, manages and runs
crawlers inside an already setup Twisted `reactor`_.
The CrawlerRunner object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
"""
crawlers = property(
lambda self: self._crawlers,
doc="Set of :class:`crawlers <scrapy.crawler.Crawler>` started by "
":meth:`crawl` and managed by this class."
)
def __init__(self, settings=None):
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
self.settings = settings
self.spider_loader = _get_spider_loader(settings)
self._crawlers = set()
self._active = set()
@property
def spiders(self):
warnings.warn("CrawlerRunner.spiders attribute is renamed to "
"CrawlerRunner.spider_loader.",
category=ScrapyDeprecationWarning, stacklevel=2)
return self.spider_loader
def crawl(self, crawler_or_spidercls, *args, **kwargs):
"""
Run a crawler with the provided arguments.
It will call the given Crawler's :meth:`~Crawler.crawl` method, while
keeping track of it so it can be stopped later.
If `crawler_or_spidercls` isn't a :class:`~scrapy.crawler.Crawler`
instance, this method will try to create one using this parameter as
the spider class given to it.
Returns a deferred that is fired when the crawling is finished.
:param crawler_or_spidercls: already created crawler, or a spider class
or spider's name inside the project to create it
:type crawler_or_spidercls: :class:`~scrapy.crawler.Crawler` instance,
:class:`~scrapy.spiders.Spider` subclass or string
:param list args: arguments to initialize the spider
:param dict kwargs: keyword arguments to initialize the spider
"""
crawler = crawler_or_spidercls
if not isinstance(crawler_or_spidercls, Crawler):
crawler = self._create_crawler(crawler_or_spidercls)
return self._crawl(crawler, *args, **kwargs)
def _crawl(self, crawler, *args, **kwargs):
self.crawlers.add(crawler)
d = crawler.crawl(*args, **kwargs)
self._active.add(d)
def _done(result):
self.crawlers.discard(crawler)
self._active.discard(d)
return result
return d.addBoth(_done)
def _create_crawler(self, spidercls):
if isinstance(spidercls, six.string_types):
spidercls = self.spider_loader.load(spidercls)
return Crawler(spidercls, self.settings)
def stop(self):
"""
Stops simultaneously all the crawling jobs taking place.
Returns a deferred that is fired when they all have ended.
"""
return defer.DeferredList([c.stop() for c in list(self.crawlers)])
@defer.inlineCallbacks
def join(self):
"""
join()
Returns a deferred that is fired when all managed :attr:`crawlers` have
completed their executions.
"""
while self._active:
yield defer.DeferredList(self._active)
class CrawlerProcess(CrawlerRunner):
"""
A class to run multiple scrapy crawlers in a process simultaneously.
This class extends :class:`~scrapy.crawler.CrawlerRunner` by adding support
for starting a Twisted `reactor`_ and handling shutdown signals, like the
keyboard interrupt command Ctrl-C. It also configures top-level logging.
This utility should be a better fit than
:class:`~scrapy.crawler.CrawlerRunner` if you aren't running another
Twisted `reactor`_ within your application.
The CrawlerProcess object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
"""
def __init__(self, settings=None):
super(CrawlerProcess, self).__init__(settings)
install_shutdown_handlers(self._signal_shutdown)
configure_logging(self.settings)
log_scrapy_info(self.settings)
def _signal_shutdown(self, signum, _):
install_shutdown_handlers(self._signal_kill)
signame = signal_names[signum]
logger.info("Received %(signame)s, shutting down gracefully. Send again to force ",
{'signame': signame})
reactor.callFromThread(self._graceful_stop_reactor)
def _signal_kill(self, signum, _):
install_shutdown_handlers(signal.SIG_IGN)
signame = signal_names[signum]
logger.info('Received %(signame)s twice, forcing unclean shutdown',
{'signame': signame})
reactor.callFromThread(self._stop_reactor)
def start(self, stop_after_crawl=True):
"""
This method starts a Twisted `reactor`_, adjusts its pool size to
:setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache based
on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`.
If `stop_after_crawl` is True, the reactor will be stopped after all
crawlers have finished, using :meth:`join`.
:param boolean stop_after_crawl: stop or not the reactor when all
crawlers have finished
"""
if stop_after_crawl:
d = self.join()
# Don't start the reactor if the deferreds are already fired
if d.called:
return
d.addBoth(self._stop_reactor)
reactor.installResolver(self._get_dns_resolver())
tp = reactor.getThreadPool()
tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE'))
reactor.addSystemEventTrigger('before', 'shutdown', self.stop)
reactor.run(installSignalHandlers=False) # blocking call
def _get_dns_resolver(self):
if self.settings.getbool('DNSCACHE_ENABLED'):
cache_size = self.settings.getint('DNSCACHE_SIZE')
else:
cache_size = 0
return CachingThreadedResolver(
reactor=reactor,
cache_size=cache_size,
timeout=self.settings.getfloat('DNS_TIMEOUT')
)
def _graceful_stop_reactor(self):
d = self.stop()
d.addBoth(self._stop_reactor)
return d
def _stop_reactor(self, _=None):
try:
reactor.stop()
except RuntimeError: # raised if already stopped or in shutdown stage
pass
def _get_spider_loader(settings):
""" Get SpiderLoader instance from settings """
if settings.get('SPIDER_MANAGER_CLASS'):
warnings.warn(
'SPIDER_MANAGER_CLASS option is deprecated. '
'Please use SPIDER_LOADER_CLASS.',
category=ScrapyDeprecationWarning, stacklevel=2
)
cls_path = settings.get('SPIDER_MANAGER_CLASS',
settings.get('SPIDER_LOADER_CLASS'))
loader_cls = load_object(cls_path)
try:
verifyClass(ISpiderLoader, loader_cls)
except DoesNotImplement:
warnings.warn(
'SPIDER_LOADER_CLASS (previously named SPIDER_MANAGER_CLASS) does '
'not fully implement scrapy.interfaces.ISpiderLoader interface. '
'Please add all missing methods to avoid unexpected runtime errors.',
category=ScrapyDeprecationWarning, stacklevel=2
)
return loader_cls.from_settings(settings.frozencopy())
|
|
#!/usr/bin/python
# Standard Library
import inspect
import json
import logging
import os
import sys
import time
import traceback
import uuid
from collections import defaultdict
from Queue import Queue
from cStringIO import StringIO
from threading import Thread, Lock
# Third Party
import boto.s3
import mixingboard
import pip
# Local
from chassis.database import db_session
from chassis.models import JobHistory
from chassis.util import processFormattedTableDescription
from receiver import EventBroadcastReceiver
# import pyspark junk (REQUIRES SPARK TO BE ON SYSPATH)
from pyspark import SparkFiles
from pyshark import SharkConf
from sparkler import SparklerContext
# set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# defaults
WORKER_MEMORY = "3300m"
SCHEDULER_POOLS = [
"verylow",
"low",
"default",
"high",
"veryhigh"
]
class JobRunner:
"""
Manages and runs pyspark jobs
"""
MAX_FILE_SIZE = 1024*1024*16 # don't write out results files larger than 16 MBs
def __init__(self, account, master, uploadFolder, iamUsername,
accessKeyId, accessKeySecret, conf={}):
"""
Initializes a job runner
Params:
account: a user account for this job runner
conf: allow you to set configuration variables on a spark context
"""
self.account = account
self.region = mixingboard.REGION
self.master = master
self.conf = conf
self.uploadFolder = uploadFolder
self.iamUsername = iamUsername
self.accessKeyId = accessKeyId
self.accessKeySecret = accessKeySecret
self.handles = defaultdict(dict)
self.handlesLock = Lock()
self.createContext()
# TODO when the runner is isolated, this needs to pass back the history over stdout
def _makeHistory(self, accountId, userId, event, jobId=None, jobHandle=None, jobType="spark", data={}):
"""
Save an entry to the job history record
Params:
account: an account
user: a user
event: an event type to save
jobId: a job id
data: a json serializable object
Returns:
a history event
"""
jobHistory = JobHistory(account_id=accountId, user_id=userId, event=event, jobType=jobType,
jobId=jobId, jobHandle=jobHandle, data=data)
db_session.add(jobHistory)
db_session.commit()
return jobHistory
def _makeHandle(self):
"""
Generates a unique query handle. Not completely resistant to
collisions but it's nearly impossible so... close enough
Returns:
a unique handle
"""
return "%s_%s" % (
uuid.uuid4().bytes.encode("base64")[:21].translate(None, "/+"),
int(time.time()*1000)
)
def runSQLSync(self, sql):
"""
Run a sql query and wait for the result
Params:
sql: a sql query
Returns:
the result of running a job
"""
result = [row.split('\t') for row in self.context.sql(sql)]
return result
def _parseSQL(self, sql):
# if we have multiple queries, break them apart
queries = []
buff = ""
skip = False
parenDepth = 0
quoteType = None
for c in sql:
buff += c
# deal with escaping
if c == "\\":
skip = True
continue
if not skip:
if quoteType is None:
if c == "(":
parenDepth += 1
elif c == ")" and parenDepth > 0:
parenDepth -= 1
elif c == "'" or c == '"':
quoteType = c
elif c == ";" and parenDepth == 0:
buff = buff.strip()
queries.append(buff[:-1])
buff = ""
continue
elif quoteType == c:
quoteType = None
skip = False
buff = buff.strip()
if len(buff) > 0:
queries.append(buff)
return queries
def runSQLAsync(self, sql, options={}):
"""
Run a job
Params:
sql: a sql query
Returns:
a handle for retrieveing future information about a job
"""
handle = self._makeHandle()
def sqlRunInner(sql, handle, options):
self.handlesLock.acquire()
self.handles[handle]['running'] = True
self.handles[handle]['options'] = options
self.handlesLock.release()
result = None
queryResult = None
columns = None
outputDir = '/tmp/%s/shark/%s' % (
self.iamUsername,
handle,
)
self._makeHistory(
self.account,
options.get('user'),
'job_start',
jobHandle=handle,
data={ "jobName": options.get("jobName") },
jobType="sql"
)
try:
self.context.setJobGroup(handle, "SQL Query by Jerb")
self.context.setLocalProperty("spark.job.name", options.get("jobName","Untitled"))
self.context.setLocalProperty("spark.job.type", options.get("jobType","sql"))
poolName = options.get("priority","default")
pool = poolName if poolName in SCHEDULER_POOLS else "default"
self.context.setLocalProperty("spark.scheduler.pool", options.get("priority","default"))
queries = self._parseSQL(sql)
self.context.setLocalProperty('spark.job.numqueries', str(len(queries)))
for i, sql in enumerate(queries):
# if we're at the last query, save it's output
if i == len(queries)-1:
try:
# TODO use hiveContext stuff and saveAsTextFile
self.context.sql("""
CREATE TABLE `__%s`
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '\t'
STORED AS TEXTFILE
LOCATION '%s'
AS %s
""" % (
handle,
outputDir,
sql
))
columns, cached = processFormattedTableDescription([row.split('\t') for row in self.context.sql('DESC FORMATTED `__%s`' % handle)])
self.context.sql("ALTER TABLE `__%s` SET TBLPROPERTIES ('EXTERNAL'='TRUE')" % handle)
self.context.sql("DROP TABLE `__%s`" % handle)
except:
queryResult = [row.split('\t') for row in self.context.sql(sql)]
else:
self.context.sql(sql)
result = {
"success": True,
"data": {
"outputDir": outputDir,
"message": "Query complete, use fetchN to retrieve results",
"handle": handle
},
"options": options
}
if queryResult:
result["data"]["result"] = queryResult
if columns:
result["data"]["columns"] = columns
except Exception as e:
result = {
"success": False,
"data" : {
"error": str(e),
"trace": str(traceback.format_exc()).replace('\t',' ').split('\n')
},
"options": options
}
self._exportResults(handle, result)
self._makeHistory(
self.account,
options.get('user'),
'job_complete',
jobHandle=handle,
data={ "jobName": options.get("jobName") },
jobType="sql"
)
self.handlesLock.acquire()
self.handles[handle]['running'] = False
self.handlesLock.release()
t = Thread(target=sqlRunInner, args=(sql, handle, options))
t.start()
return handle
def runJobSync(self, job, options={}):
"""
Run a job
Params:
job: a dictionary containing job information
options: a dictionary of options to pass to the job
Returns:
the result of running a job
"""
requirements = self._fetchRequirements(job["jobPath"])
for jobFile in job["jobFiles"]:
requirements.extend(self._fetchRequirements(jobFile))
# make our requirements unique
requirements = list(set(requirements))
self._installRequirements(requirements, job["name"])
sys.path.append(job["jobFolder"])
main = __import__(job["jobModule"])
reload(main)
for jobFile in job["jobFiles"]:
# truncate the ".py" extension from the job file
jobFileModuleName = jobFile[:jobFile.rfind(".")]
jobFileModule = __import__(jobFileModuleName)
reload(jobFileModule)
self._addRequirements(job["jobPath"], job["jobFiles"], os.path.join(job["jobFolder"], "deps"))
result = {}
if len(inspect.getargspec(main.run)[0]) == 1:
result = main.run(self.context)
else:
result = main.run(self.context, options=options)
return result
def _exportResults(self, handle, result):
# TODO make region configurable
s3Conn = boto.s3.connect_to_region(self.region, aws_access_key_id=self.accessKeyId, aws_secret_access_key=self.accessKeySecret)
bucket = s3Conn.get_bucket('quarry-data-%s' % self.region, validate=False)
resultsKey = "tmp/%s/spark/%s" % (
self.iamUsername,
handle
)
key = bucket.new_key(resultsKey)
key.set_contents_from_string(json.dumps(result))
def runJobAsync(self, job, options={}):
"""
Run a job
Params:
job: a dictionary containing job information
options: a dictionary of options to pass to the job
Returns:
a handle for retrieveing future information about a job
"""
handle = self._makeHandle()
def jobRunInner(job, options, handle):
self.handlesLock.acquire()
self.handles[handle]['running'] = True
self.handles[handle]['options'] = options
self.handlesLock.release()
self._makeHistory(
self.account,
options.get('user'),
'job_start',
jobHandle=handle,
data={ "jobName": options.get("jobName") },
jobType=options.get("jobType","spark")
)
result = None
try:
requirements = self._fetchRequirements(job["jobPath"])
for jobFile in job["jobFiles"]:
requirements.extend(self._fetchRequirements(jobFile))
# make our requirements unique
requirements = list(set(requirements))
self._installRequirements(requirements, job["name"])
sys.path.append(job["jobFolder"])
main = __import__(job["jobModule"])
reload(main)
self._addRequirements(job["jobPath"], job["jobFiles"], os.path.join(job["jobFolder"], "deps"))
self.context.setJobGroup(handle, "SQL Query by Jerb")
result = {}
self.context.setLocalProperty("spark.job.name", options.get("jobName","Untitled"))
self.context.setLocalProperty("spark.job.type", options.get("jobType","python"))
if len(inspect.getargspec(main.run)[0]) == 1:
result = main.run(self.context)
else:
result = main.run(self.context, options=options)
result = {
"success": True,
"data": result,
"options": options
}
except Exception as e:
result = {
"success": False,
"data": {
"error": str(e),
"trace": str(traceback.format_exc()).replace('\t',' ').split('\n')
},
"options": {}
}
self._exportResults(handle, result)
self._makeHistory(
self.account,
options.get('user'),
'job_complete',
jobHandle=handle,
data={ "jobName": options.get("jobName") },
jobType=options.get("jobType","spark")
)
self.handlesLock.acquire()
self.handles[handle]['running'] = False
self.handlesLock.release()
t = Thread(target=jobRunInner, args=(job, options, handle))
t.start()
return handle
def getProgress(self, jobType=None):
"""
Get the current overall status for this spark context
Returns:
a dictionary with a whole bunch of status info
"""
return self.receiver.getProgress(jobType=jobType)
def getStatus(self):
"""
Get the current overall status for this spark context
Returns:
a dictionary with a whole bunch of status info
"""
return self.receiver.getStatus()
def getHandleStatus(self, handle):
"""
Get the current status of a spark job
Params:
handle: a job handle
Returns:
the current status for the job
"""
# TODO lock this method
status = {
"status": self.receiver.getJobStatus(handle),
"running": self.handles[handle]['running']
}
return status
def cancelJobWithHandle(self, handle):
"""
Cancel a spark job
Params:
handle: a job handle
"""
# TODO lock this method
return self.context.cancelJobGroup(handle)
def getHandleProgress(self, handle):
"""
Get the current progress of a spark job
Params:
handle: a job handle
Returns:
the current progress for the job
"""
# TODO lock this method
progress = {
"progress": self.receiver.getJobProgress(handle),
"running": self.handles[handle]['running']
}
return progress
def getResults(self, handle):
"""
Get the results of the spark job running under
the given handle
Params:
handle: a job handle
Returns:
results of the spark job
"""
# todo lock this method
if 'result' not in self.handles[handle]:
return False, None
else:
results = self.handles[handle]['result']
options = self.handles[handle]['options']
del self.handles[handle]
return True, {
"results": results,
"options": options
}
def stop(self):
"""
Stop and job the job runner child process
"""
self.jobSend.send(None)
self.proc.join()
def kill(self):
"""
Stop and job the job runner child process
"""
self.context.stop()
self.proc.join()
def _fetchRequirements(self, path):
"""
Parse the special requirements dictionary at the start of the job file
Params:
path: the path to the code for the job
Returns:
an array of parsed pip package names
"""
codeFile = open(path)
code = ""
requirements = []
for line in codeFile.readlines():
try:
code += line
requirements = json.loads(code)
except Exception as e:
pass
return requirements
def _installRequirements(self, reqs, jobName):
"""
Use pip to install requirements locally and download them
so they can be shipped to remote workers
Params:
reqs: an array of pip package names
jobName: the name of the job, this determines where files will be kept
"""
if len(reqs) > 0:
args = ['install','--download-cache=%s/_pip_cache/' % self.uploadFolder]
install_args = list(args)
install_args.extend(reqs)
res = pip.main(initial_args = install_args)
download_args = list(args)
download_args.extend(["--download","%s/%s/%s/deps" % (self.uploadFolder, self.account, jobName)])
download_args.extend(reqs)
res = pip.main(initial_args = download_args)
def _addRequirements(self, jobMainFile, jobFiles, folder):
"""
Modify the environment of the spark context to add
the current file as well as pip dependencies to it
Params:
jobFile: the path to the file for the job
folder: the path to a folder containing zipped
pip dependencies
"""
for reqFile in os.listdir(folder):
self.context.addPyFile(os.path.join(folder, reqFile))
self.context.addPyFile(jobMainFile)
for filename in jobFiles:
self.context.addPyFile(filename)
# FIXME test under conditions where job server is not on same node as worker
self.context.environment["PYTHONPATH"] += ":".join(["./%s" % folder for folder in os.walk(SparkFiles.getRootDirectory()).next()[1]])
def createContext(self):
conf = SharkConf()
if self.conf:
for key, value in self.conf.items():
conf.set(key, value)
conf.setMaster(self.master)
conf.set("spark.executor.memory", WORKER_MEMORY)
conf.set("spark.files.overwrite", "true")
self.receiver = EventBroadcastReceiver()
self.receiver.run()
conf.set("spark.eventBroadcast.enabled", "true")
conf.set("spark.eventBroadcast.remotePort", self.receiver.port)
conf.set("spark.scheduler.mode", "FAIR")
# TODO add fair scheduling weighted pool support
appName = uuid.uuid4().bytes.encode("base64")[:21].translate(None, "/+")
conf.setAppName(appName)
# set up scheduler pools
fairSchedulerFile = os.path.join(os.path.dirname(os.path.realpath(__file__)), "conf", "fairscheduler.xml")
conf.set("spark.scheduler.allocation.file", fairSchedulerFile)
self.context = SparklerContext(conf=conf, accessKeyId=self.accessKeyId, accessKeySecret=self.accessKeySecret,
iamUsername=self.iamUsername, bucket='quarry-data-%s' % self.region)
# add pyspark home to the python path
self.context.environment["PYTHONPATH"] = ":%s:" % os.path.join(os.environ["SPARK_HOME"], "python")
# warm up the object store
self.context.sql("SHOW TABLES")
|
|
import numpy as np
import pytest
import taichi as ti
from tests import test_utils
@test_utils.test()
def test_1d():
x = ti.field(ti.f32, shape=(16))
@ti.kernel
def func():
for i in ti.ndrange((4, 10)):
x[i] = i
func()
for i in range(16):
if 4 <= i < 10:
assert x[i] == i
else:
assert x[i] == 0
@test_utils.test()
def test_2d():
x = ti.field(ti.f32, shape=(16, 32))
t = 8
@ti.kernel
def func():
for i, j in ti.ndrange((4, 10), (3, t)):
val = i + j * 10
x[i, j] = val
func()
for i in range(16):
for j in range(32):
if 4 <= i < 10 and 3 <= j < 8:
assert x[i, j] == i + j * 10
else:
assert x[i, j] == 0
@test_utils.test()
def test_3d():
x = ti.field(ti.f32, shape=(16, 32, 64))
@ti.kernel
def func():
for i, j, k in ti.ndrange((4, 10), (3, 8), 17):
x[i, j, k] = i + j * 10 + k * 100
func()
for i in range(16):
for j in range(32):
for k in range(64):
if 4 <= i < 10 and 3 <= j < 8 and k < 17:
assert x[i, j, k] == i + j * 10 + k * 100
else:
assert x[i, j, k] == 0
@test_utils.test()
def test_tensor_based_3d():
x = ti.field(ti.i32, shape=(6, 6, 6))
y = ti.field(ti.i32, shape=(6, 6, 6))
@ti.kernel
def func():
lower = ti.Vector([0, 1, 2])
upper = ti.Vector([3, 4, 5])
for I in ti.grouped(
ti.ndrange((lower[0], upper[0]), (lower[1], upper[1]),
(lower[2], upper[2]))):
x[I] = I[0] + I[1] + I[2]
for i in range(0, 3):
for j in range(1, 4):
for k in range(2, 5):
y[i, j, k] = i + j + k
func()
for i in range(6):
for j in range(6):
for k in range(6):
assert x[i, j, k] == y[i, j, k]
@test_utils.test()
def test_static_grouped():
x = ti.field(ti.f32, shape=(16, 32, 64))
@ti.kernel
def func():
for I in ti.static(ti.grouped(ti.ndrange((4, 5), (3, 5), 5))):
x[I] = I[0] + I[1] * 10 + I[2] * 100
func()
for i in range(16):
for j in range(32):
for k in range(64):
if 4 <= i < 5 and 3 <= j < 5 and k < 5:
assert x[i, j, k] == i + j * 10 + k * 100
else:
assert x[i, j, k] == 0
@test_utils.test()
def test_static_grouped_static():
x = ti.Matrix.field(2, 3, dtype=ti.f32, shape=(16, 4))
@ti.kernel
def func():
for i, j in ti.ndrange(16, 4):
for I in ti.static(ti.grouped(ti.ndrange(2, 3))):
x[i, j][I] = I[0] + I[1] * 10 + i + j * 4
func()
for i in range(16):
for j in range(4):
for k in range(2):
for l in range(3):
assert x[i, j][k, l] == k + l * 10 + i + j * 4
@test_utils.test()
def test_field_init_eye():
# https://github.com/taichi-dev/taichi/issues/1824
n = 32
A = ti.field(ti.f32, (n, n))
@ti.kernel
def init():
for i, j in ti.ndrange(n, n):
if i == j:
A[i, j] = 1
init()
assert np.allclose(A.to_numpy(), np.eye(n, dtype=np.float32))
@test_utils.test()
def test_ndrange_index_floordiv():
# https://github.com/taichi-dev/taichi/issues/1829
n = 10
A = ti.field(ti.f32, (n, n))
@ti.kernel
def init():
for i, j in ti.ndrange(n, n):
if i // 2 == 0:
A[i, j] = i
init()
for i in range(n):
for j in range(n):
if i // 2 == 0:
assert A[i, j] == i
else:
assert A[i, j] == 0
@test_utils.test()
def test_nested_ndrange():
# https://github.com/taichi-dev/taichi/issues/1829
n = 2
A = ti.field(ti.i32, (n, n, n, n))
@ti.kernel
def init():
for i, j in ti.ndrange(n, n):
for k, l in ti.ndrange(n, n):
r = i * n**3 + j * n**2 + k * n + l
A[i, j, k, l] = r
init()
for i in range(n):
for j in range(n):
for k in range(n):
for l in range(n):
r = i * n**3 + j * n**2 + k * n + l
assert A[i, j, k, l] == r
@test_utils.test(ti.cpu)
def test_ndrange_ast_transform():
n, u, v = 4, 3, 2
a = ti.field(ti.i32, ())
b = ti.field(ti.i32, ())
A = ti.field(ti.i32, (n, n))
@ti.kernel
def func():
# `__getitem__ cannot be called from Python-scope` will be raised if
# `a[None]` is not transformed to `ti.subscript(a, None)` in ti.ndrange:
for i, j in ti.ndrange(a[None], b[None]):
r = i * n + j + 1
A[i, j] = r
a[None] = u
b[None] = v
func()
for i in range(n):
for j in range(n):
if i < u and j < v:
r = i * n + j + 1
else:
r = 0
assert A[i, j] == r
@test_utils.test()
def test_grouped_ndrange_star():
@ti.kernel
def foo() -> ti.i32:
ret = 0
for I in ti.grouped(ti.ndrange(*[[1, 3]] * 3)):
ret += I[0] + I[1] + I[2]
return ret
assert foo() == 36
@test_utils.test()
def test_ndrange_three_arguments():
@ti.kernel
def foo():
for i in ti.ndrange((1, 2, 3)):
pass
with pytest.raises(
ti.TaichiSyntaxError,
match=
r"Every argument of ndrange should be a scalar or a tuple/list like \(begin, end\)"
):
foo()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the filters."""
import unittest
from plaso.formatters import interface as formatters_interface
from plaso.lib import event
from plaso.lib import objectfilter
from plaso.lib import pfilter
from plaso.lib import timelib_test
from plaso.parsers import interface as parsers_interface
import pytz
class Empty(object):
"""An empty object."""
class PfilterFakeFormatter(formatters_interface.EventFormatter):
"""A formatter for this fake class."""
DATA_TYPE = 'Weirdo:Made up Source:Last Written'
FORMAT_STRING = '{text}'
FORMAT_STRING_SHORT = '{text_short}'
SOURCE_LONG = 'Fake Parsing Source'
SOURCE_SHORT = 'REG'
class PfilterFakeParser(parsers_interface.BaseParser):
"""A fake parser that does not parse anything, but registers."""
NAME = 'pfilter_fake_parser'
DATA_TYPE = 'Weirdo:Made up Source:Last Written'
def Parse(self, unused_parser_context, unused_file_entry):
"""Extract data from a fake plist file for testing.
Args:
parser_context: A parser context object (instance of ParserContext).
file_entry: A file entry object (instance of dfvfs.FileEntry).
Yields:
An event object (instance of EventObject) that contains the parsed
attributes.
"""
event_object = event.EventObject()
event_object.timestamp = timelib_test.CopyStringToTimestamp(
'2015-11-18 01:15:43')
event_object.timestamp_desc = 'Last Written'
event_object.text_short = 'This description is different than the long one.'
event_object.text = (
u'User did a very bad thing, bad, bad thing that awoke Dr. Evil.')
event_object.filename = (
u'/My Documents/goodfella/Documents/Hideout/myfile.txt')
event_object.hostname = 'Agrabah'
event_object.parser = 'Weirdo'
event_object.inode = 1245
event_object.display_name = u'unknown:{0:s}'.format(event_object.filename)
event_object.data_type = self.DATA_TYPE
yield event_object
class PfilterAnotherParser(PfilterFakeParser):
"""Another fake parser that does nothing but register as a parser."""
NAME = 'pfilter_another_fake'
DATA_TYPE = 'Weirdo:AnotherFakeSource'
class PfilterAnotherFakeFormatter(PfilterFakeFormatter):
"""Formatter for the AnotherParser event."""
DATA_TYPE = 'Weirdo:AnotherFakeSource'
SOURCE_LONG = 'Another Fake Source'
class PfilterAllEvilParser(PfilterFakeParser):
"""A class that does nothing but has a fancy name."""
NAME = 'pfilter_evil_fake_parser'
DATA_TYPE = 'Weirdo:AllEvil'
class PfilterEvilFormatter(PfilterFakeFormatter):
"""Formatter for the AllEvilParser."""
DATA_TYPE = 'Weirdo:AllEvil'
SOURCE_LONG = 'A Truly Evil'
class PFilterTest(unittest.TestCase):
"""Simple plaso specific tests to the pfilter implementation."""
def setUp(self):
"""Set up the necessary variables used in tests."""
self._pre = Empty()
self._pre.zone = pytz.UTC
def testPlasoEvents(self):
"""Test plaso EventObjects, both Python and Protobuf version.
These are more plaso specific tests than the more generic
objectfilter ones. It will create an EventObject that stores
some attributes. These objects will then be serialzed into an
EventObject protobuf and all tests run against both the native
Python object as well as the protobuf.
"""
event_object = event.EventObject()
event_object.data_type = 'Weirdo:Made up Source:Last Written'
event_object.timestamp = timelib_test.CopyStringToTimestamp(
'2015-11-18 01:15:43')
event_object.timestamp_desc = 'Last Written'
event_object.text_short = 'This description is different than the long one.'
event_object.text = (
u'User did a very bad thing, bad, bad thing that awoke Dr. Evil.')
event_object.filename = (
u'/My Documents/goodfella/Documents/Hideout/myfile.txt')
event_object.hostname = 'Agrabah'
event_object.parser = 'Weirdo'
event_object.inode = 1245
event_object.mydict = {
'value': 134, 'another': 'value', 'A Key (with stuff)': 'Here'}
event_object.display_name = u'unknown:{0:s}'.format(event_object.filename)
# Series of tests.
query = 'filename contains \'GoodFella\''
self.RunPlasoTest(event_object, query, True)
# Double negative matching -> should be the same
# as a positive one.
query = 'filename not not contains \'GoodFella\''
my_parser = pfilter.BaseParser(query)
self.assertRaises(
objectfilter.ParseError,
my_parser.Parse)
# Test date filtering.
query = 'date >= \'2015-11-18\''
self.RunPlasoTest(event_object, query, True)
query = 'date < \'2015-11-19\''
self.RunPlasoTest(event_object, query, True)
# 2015-11-18T01:15:43
query = (
'date < \'2015-11-18T01:15:44.341\' and date > \'2015-11-18 01:15:42\'')
self.RunPlasoTest(event_object, query, True)
query = 'date > \'2015-11-19\''
self.RunPlasoTest(event_object, query, False)
# Perform few attribute tests.
query = 'filename not contains \'sometext\''
self.RunPlasoTest(event_object, query, True)
query = (
'timestamp_desc CONTAINS \'written\' AND date > \'2015-11-18\' AND '
'date < \'2015-11-25 12:56:21\' AND (source_short contains \'LOG\' or '
'source_short CONTAINS \'REG\')')
self.RunPlasoTest(event_object, query, True)
query = 'parser is not \'Made\''
self.RunPlasoTest(event_object, query, True)
query = 'parser is not \'Weirdo\''
self.RunPlasoTest(event_object, query, False)
query = 'mydict.value is 123'
self.RunPlasoTest(event_object, query, False)
query = 'mydict.akeywithstuff contains "ere"'
self.RunPlasoTest(event_object, query, True)
query = 'mydict.value is 134'
self.RunPlasoTest(event_object, query, True)
query = 'mydict.value < 200'
self.RunPlasoTest(event_object, query, True)
query = 'mydict.another contains "val"'
self.RunPlasoTest(event_object, query, True)
query = 'mydict.notthere is 123'
self.RunPlasoTest(event_object, query, False)
query = 'source_long not contains \'Fake\''
self.RunPlasoTest(event_object, query, False)
query = 'source is \'REG\''
self.RunPlasoTest(event_object, query, True)
query = 'source is not \'FILE\''
self.RunPlasoTest(event_object, query, True)
# Multiple attributes.
query = (
'source_long is \'Fake Parsing Source\' AND description_long '
'regexp \'bad, bad thing [\\sa-zA-Z\\.]+ evil\'')
self.RunPlasoTest(event_object, query, False)
query = (
'source_long is \'Fake Parsing Source\' AND text iregexp '
'\'bad, bad thing [\\sa-zA-Z\\.]+ evil\'')
self.RunPlasoTest(event_object, query, True)
def RunPlasoTest(self, obj, query, result):
"""Run a simple test against an event object."""
my_parser = pfilter.BaseParser(query).Parse()
matcher = my_parser.Compile(
pfilter.PlasoAttributeFilterImplementation)
self.assertEqual(result, matcher.Matches(obj))
if __name__ == "__main__":
unittest.main()
|
|
import http.client
from unittest import mock
import unittest
import urllib.parse
import responses
from cumulusci.tasks.release_notes.exceptions import GithubIssuesError
from cumulusci.tasks.release_notes.generator import GithubReleaseNotesGenerator
from cumulusci.tasks.release_notes.parser import BaseChangeNotesParser
from cumulusci.tasks.release_notes.parser import ChangeNotesLinesParser
from cumulusci.tasks.release_notes.parser import GithubIssuesParser
from cumulusci.tasks.release_notes.parser import GithubLinesParser
from cumulusci.tasks.release_notes.parser import IssuesParser
from cumulusci.tasks.release_notes.parser import InstallLinkParser
from cumulusci.core.exceptions import GithubApiNotFoundError
from cumulusci.core.github import get_github_api
from cumulusci.tasks.github.tests.util_github_api import GithubApiTestMixin
from cumulusci.tasks.release_notes.tests.utils import MockUtil
PARSER_CONFIG = [
{
"class_path": "cumulusci.tasks.release_notes.parser.GithubLinesParser",
"title": "Critical Changes",
},
{
"class_path": "cumulusci.tasks.release_notes.parser.GithubLinesParser",
"title": "Changes",
},
{
"class_path": "cumulusci.tasks.release_notes.parser.GithubIssuesParser",
"title": "Issues Closed",
},
]
class TestBaseChangeNotesParser(unittest.TestCase):
def test_parse(self):
parser = BaseChangeNotesParser("Title")
with self.assertRaises(NotImplementedError):
parser.parse()
def test_render(self):
parser = BaseChangeNotesParser("Title")
with self.assertRaises(NotImplementedError):
parser.render()
class TestChangeNotesLinesParser(unittest.TestCase):
def setUp(self):
self.title = "Title"
def test_parse_no_start_line(self):
change_note = "foo\r\nbar\r\n"
parser = ChangeNotesLinesParser(None, self.title)
line_added = parser.parse(change_note)
self.assertEqual(parser.content, [])
self.assertFalse(line_added)
def test_parse_start_line_no_content(self):
change_note = "# {}\r\n\r\n".format(self.title)
parser = ChangeNotesLinesParser(None, self.title)
line_added = parser.parse(change_note)
self.assertEqual(parser.content, [])
self.assertFalse(line_added)
def test_parse_start_line_no_end_line(self):
change_note = "# {}\r\nfoo\r\nbar".format(self.title)
parser = ChangeNotesLinesParser(None, self.title)
line_added = parser.parse(change_note)
self.assertEqual(parser.content, ["foo", "bar"])
self.assertEqual(True, line_added)
def test_parse_start_line_end_at_header(self):
change_note = "# {}\r\nfoo\r\n# Another Header\r\nbar".format(self.title)
parser = ChangeNotesLinesParser(None, self.title)
line_added = parser.parse(change_note)
self.assertEqual(parser.content, ["foo"])
self.assertTrue(line_added)
def test_parse_start_line_no_content_no_end_line(self):
change_note = "# {}".format(self.title)
parser = ChangeNotesLinesParser(None, self.title)
line_added = parser.parse(change_note)
self.assertEqual(parser.content, [])
self.assertFalse(line_added)
def test_parse_multiple_start_lines_without_end_lines(self):
change_note = "# {0}\r\nfoo\r\n# {0}\r\nbar\r\n".format(self.title)
parser = ChangeNotesLinesParser(None, self.title)
line_added = parser.parse(change_note)
self.assertEqual(parser.content, ["foo", "bar"])
self.assertTrue(line_added)
def test_parse_multiple_start_lines_with_end_lines(self):
change_note = "# {0}\r\nfoo\r\n\r\n# {0}\r\nbar\r\n\r\nincluded\r\n\r\n# not included".format(
self.title
)
parser = ChangeNotesLinesParser(None, self.title)
line_added = parser.parse(change_note)
self.assertEqual(parser.content, ["foo", "bar", "included"])
self.assertTrue(line_added)
def test_parse_multi_level_indent(self):
change_note = "# {0}\r\nfoo \r\n bar \r\n baz \r\n".format(
self.title
)
parser = ChangeNotesLinesParser(None, self.title)
line_added = parser.parse(change_note)
self.assertEqual(parser.content, ["foo", " bar", " baz"])
self.assertTrue(line_added)
def test_parse_subheading(self):
change_note = "# {0}\r\n## Subheading\r\nfoo".format(self.title)
parser = ChangeNotesLinesParser(None, self.title)
line_added = parser.parse(change_note)
self.assertEqual([], parser.content)
self.assertEqual({"Subheading": ["foo"]}, parser.h2)
self.assertTrue(line_added)
def test_parse_subheading_from_another_section(self):
change_note = "## Subheading\r\n# {0}\r\nfoo".format(self.title)
parser = ChangeNotesLinesParser(None, self.title)
line_added = parser.parse(change_note)
self.assertEqual(["foo"], parser.content)
self.assertEqual({}, parser.h2)
self.assertTrue(line_added)
def test_render_no_content(self):
parser = ChangeNotesLinesParser(None, self.title)
self.assertEqual(parser.render(), "")
def test_render_one_content(self):
parser = ChangeNotesLinesParser(None, self.title)
content = ["foo"]
parser.content = content
self.assertEqual(
parser.render(), "# {}\r\n\r\n{}".format(self.title, content[0])
)
def test_render_multiple_content(self):
parser = ChangeNotesLinesParser(None, self.title)
content = ["foo", "bar"]
parser.content = content
self.assertEqual(
parser.render(), "# {}\r\n\r\n{}".format(self.title, "\r\n".join(content))
)
def test_render_subheadings(self):
parser = ChangeNotesLinesParser(None, self.title)
parser.h2 = {"Subheading": ["foo"]}
self.assertEqual(
parser.render(),
"# {}\r\n\r\n\r\n## Subheading\r\n\r\nfoo".format(self.title),
)
class TestGithubLinesParser(unittest.TestCase):
def setUp(self):
self.title = "Title"
def test_parse(self):
generator = mock.Mock(link_pr=True)
parser = GithubLinesParser(generator, self.title)
pr = mock.Mock(
number=1, html_url="http://pr", body="# {}\r\n\r\nfoo".format(self.title)
)
parser.parse(pr)
self.assertEqual(1, parser.pr_number)
self.assertEqual("http://pr", parser.pr_url)
self.assertEqual(["foo [[PR1](http://pr)]"], parser.content)
def test_parse_empty_pull_request_body(self):
generator = mock.Mock(link_pr=True)
parser = GithubLinesParser(generator, self.title)
pr = mock.Mock(number=1, html_url="http://pr", body=None)
line_added = parser.parse(pr)
assert not line_added
class TestIssuesParser(unittest.TestCase):
def setUp(self):
self.title = "Issues"
def test_issue_numbers(self):
change_note = "# {}\r\nfix #2\r\nfix #3\r\nfix #5\r\n".format(self.title)
parser = IssuesParser(None, self.title)
parser.parse(change_note)
self.assertEqual(parser.content, [2, 3, 5])
def test_issue_numbers_with_links(self):
change_note = "# {}\r\nfix [#2](https://issue)\r\nfix [#3](http://issue)\r\nfix #5\r\n".format(
self.title
)
parser = IssuesParser(None, self.title)
parser.parse(change_note)
self.assertEqual(parser.content, [2, 3, 5])
def test_issue_numbers_and_other_numbers(self):
change_note = "# {}\r\nfixes #2 but not # 3 or 5".format(self.title)
parser = IssuesParser(None, self.title)
parser.parse(change_note)
self.assertEqual(parser.content, [2])
def test_multiple_issue_numbers_per_line(self):
change_note = "# {}\r\nfix #2 also does fix #3 and fix #5\r\n".format(
self.title
)
parser = IssuesParser(None, self.title)
parser.parse(change_note)
self.assertEqual(parser.content, [2, 3, 5])
def test_render(self):
parser = IssuesParser(None, self.title)
parser.content = ["1: foo"]
self.assertEqual("# Issues\r\n\r\n#1: foo", parser.render())
class TestGithubIssuesParser(unittest.TestCase, GithubApiTestMixin):
def setUp(self):
self.init_github()
self.gh = get_github_api("TestUser", "TestPass")
self.title = "Issues"
# Set up the mock release_tag lookup response
self.issue_number_valid = 123
self.issue_number_invalid = 456
self.pr_number = 789
self.pr_url = "https://github.com/{}/{}/pulls/{}".format(
"TestOwner", "TestRepo", self.pr_number
)
self.mock_util = MockUtil("TestOwner", "TestRepo")
@responses.activate
def test_issue_numbers(self):
self.mock_util.mock_get_repo()
change_note = "# {}\r\nFixes #2, Closed #3 and Resolve #5".format(self.title)
self.mock_util.mock_pull_request(self.pr_number, body=change_note)
generator = self._create_generator()
repo = generator.get_repo()
pull_request = repo.pull_request(self.pr_number)
parser = GithubIssuesParser(generator, self.title)
parser.parse(pull_request)
pr_url = "https://github.com/TestOwner/TestRepo/pulls/{}".format(self.pr_number)
expected_content = self._create_expected_content([2, 3, 5], pr_url)
self.assertEqual(parser.content, expected_content)
@responses.activate
def test_issue_numbers_and_other_numbers(self):
self.mock_util.mock_get_repo()
change_note = "# {}\r\nFixes #2 but not #5".format(self.title)
self.mock_util.mock_pull_request(self.pr_number, body=change_note)
generator = self._create_generator()
repo = generator.get_repo()
pull_request = repo.pull_request(self.pr_number)
parser = GithubIssuesParser(generator, self.title)
parser.parse(pull_request)
pr_url = "https://github.com/TestOwner/TestRepo/pulls/{}".format(self.pr_number)
expected_content = self._create_expected_content([2], pr_url)
self.assertEqual(parser.content, expected_content)
@responses.activate
def test_no_issue_numbers(self):
pr_number = 1
self.mock_util.mock_get_repo()
change_note = "# {}\r\n#2 and #3 are fixed by this change".format(self.title)
self.mock_util.mock_pull_request(pr_number, body=change_note)
generator = self._create_generator()
repo = generator.get_repo()
pull_request = repo.pull_request(pr_number)
parser = GithubIssuesParser(generator, self.title)
parser.parse(pull_request)
self.assertEqual(parser.content, [])
@responses.activate
def test_render_issue_number_valid(self):
api_url = "{}/issues/{}".format(self.repo_api_url, self.issue_number_valid)
expected_response = self._get_expected_issue(self.issue_number_valid)
self.mock_util.mock_get_repo()
responses.add(method=responses.GET, url=api_url, json=expected_response)
generator = self._create_generator()
generator.link_pr = True
parser = GithubIssuesParser(generator, self.title)
parser.content = [
{
"issue_number": self.issue_number_valid,
"pr_number": self.pr_number,
"pr_url": self.pr_url,
}
]
expected_render = self._create_expected_render(
self.issue_number_valid, expected_response["title"], True
)
self.assertEqual(parser.render(), expected_render)
@responses.activate
def test_render_issue_number_invalid(self):
api_url = "{}/issues/{}".format(self.repo_api_url, self.issue_number_invalid)
expected_response = self._get_expected_not_found()
self.mock_util.mock_get_repo()
responses.add(
method=responses.GET,
url=api_url,
json=expected_response,
status=http.client.NOT_FOUND,
)
generator = self._create_generator()
parser = GithubIssuesParser(generator, self.title)
parser.content = [
{
"issue_number": self.issue_number_invalid,
"pr_number": self.pr_number,
"pr_url": self.pr_url,
}
]
with self.assertRaises(GithubApiNotFoundError):
parser.render()
def test_init__issues_disabled(self):
generator = mock.Mock(has_issues=False)
with self.assertRaises(GithubIssuesError):
GithubIssuesParser(generator, self.title)
def _create_expected_content(self, issue_numbers, pr_url):
y = []
for n in issue_numbers:
y.append({"issue_number": n, "pr_number": self.pr_number, "pr_url": pr_url})
return y
def _create_expected_render(self, issue_number, issue_title, link_pr):
render = "# {}\r\n\r\n#{}: {}".format(self.title, issue_number, issue_title)
if link_pr:
render += " [[PR{}]({})]".format(self.pr_number, self.pr_url)
return render
def _create_generator(self):
generator = GithubReleaseNotesGenerator(
self.gh, self.github_info.copy(), PARSER_CONFIG, "release/1.1"
)
return generator
class TestCommentingGithubIssuesParser(unittest.TestCase, GithubApiTestMixin):
def setUp(self):
self.init_github()
self.gh = get_github_api("TestUser", "TestPass")
self.mock_util = MockUtil("TestOwner", "TestRepo")
self.title = "Issues"
self.issue_number_without_comments = 1
self.issue_number_with_beta_comment = 2
self.issue_number_without_beta_comment = 3
self.issue_number_with_prod_comment = 4
self.issue_number_without_prod_comment = 5
self.pr_number = 6
self.pr_url = "https://github.com/TestOwner/TestRepo/pulls/{}".format(
self.pr_number
)
self.tag_prod = "release/1.2"
self.tag_beta = "beta/1.2-Beta_3"
self.tag_not_prod_or_beta = "foo"
self.version_number_prod = "1.1"
self.version_number_beta = "1.2 (Beta 3)"
def _create_generator(self, tag):
generator = GithubReleaseNotesGenerator(
self.gh, self.github_info.copy(), PARSER_CONFIG, tag, publish=True
)
return generator
@responses.activate
def test_render_issue_without_comments(self):
issue_number = self.issue_number_without_comments
tag = self.tag_not_prod_or_beta
self.mock_util.mock_get_repo()
self.mock_util.mock_post_comment(issue_number)
# Mock the issue
api_url = "{}/issues/{}".format(self.repo_api_url, issue_number)
expected_issue = self._get_expected_issue(issue_number)
responses.add(method=responses.GET, url=api_url, json=expected_issue)
# Mock the comments list
api_url = "{}/issues/{}/comments".format(self.repo_api_url, issue_number)
responses.add(
method=responses.GET, url=api_url, json=[], content_type="application/json"
)
generator = self._create_generator(tag)
parser = GithubIssuesParser(generator, self.title)
parser.content = [
{
"issue_number": issue_number,
"pr_number": self.pr_number,
"pr_url": self.pr_url,
}
]
expected_render = self._create_expected_render(
issue_number, expected_issue["title"], False
)
render = parser.render()
self.assertEqual(render, expected_render)
self.assertEqual(len(responses.calls._calls), 2)
@responses.activate
def test_render_issue_with_beta_comment(self):
issue_number = self.issue_number_with_beta_comment
tag = self.tag_beta
self.mock_util.mock_get_repo()
self.mock_util.mock_post_comment(issue_number)
# Mock the issue
api_url = "{}/issues/{}".format(self.repo_api_url, issue_number)
expected_issue = self._get_expected_issue(issue_number)
responses.add(method=responses.GET, url=api_url, json=expected_issue)
# Mock the comments list
api_url = "{}/issues/{}/comments".format(self.repo_api_url, issue_number)
expected_comment_1 = self._get_expected_issue_comment(
GithubIssuesParser.ISSUE_COMMENT["beta"]
)
expected_comments = [expected_comment_1]
responses.add(method=responses.GET, url=api_url, json=expected_comments)
generator = self._create_generator(tag)
parser = GithubIssuesParser(generator, self.title)
parser.content = [
{
"issue_number": issue_number,
"pr_number": self.pr_number,
"pr_url": self.pr_url,
}
]
expected_render = self._create_expected_render(
issue_number, expected_issue["title"], False
)
render = parser.render()
self.assertEqual(render, expected_render)
self.assertEqual(len(responses.calls._calls), 3)
@responses.activate
def test_render_issue_without_beta_comment(self):
issue_number = self.issue_number_without_beta_comment
tag = self.tag_beta
self.mock_util.mock_get_repo()
# Mock the issue
api_url = "{}/issues/{}".format(self.repo_api_url, issue_number)
expected_issue = self._get_expected_issue(issue_number)
responses.add(method=responses.GET, url=api_url, json=expected_issue)
# Mock the comments list
api_url = "{}/issues/{}/comments".format(self.repo_api_url, issue_number)
expected_comment_1 = self._get_expected_issue_comment("Some other comment")
responses.add(
method=responses.GET, url=api_url, json=[], content_type="application/json"
)
# Mock the comment post response
api_url = "{}/issues/{}/comments".format(self.repo_api_url, issue_number)
expected_comment_1 = self._get_expected_issue_comment(
"{} {}".format(
GithubIssuesParser.ISSUE_COMMENT["beta"], self.version_number_beta
)
)
responses.add(method=responses.POST, url=api_url, json=expected_comment_1)
generator = self._create_generator(tag)
parser = GithubIssuesParser(generator, self.title)
parser.content = [
{
"issue_number": issue_number,
"pr_number": self.pr_number,
"pr_url": self.pr_url,
}
]
expected_render = self._create_expected_render(
issue_number, expected_issue["title"], False
)
self.assertEqual(parser.render(), expected_render)
self.assertEqual(len(responses.calls._calls), 4)
@responses.activate
def test_render_issue_with_prod_comment(self):
issue_number = self.issue_number_with_prod_comment
tag = self.tag_prod
self.mock_util.mock_get_repo()
# Mock the issue
api_url = "{}/issues/{}".format(self.repo_api_url, issue_number)
expected_issue = self._get_expected_issue(issue_number)
responses.add(method=responses.GET, url=api_url, json=expected_issue)
# Mock the comments list
api_url = "{}/issues/{}/comments".format(self.repo_api_url, issue_number)
expected_comment_1 = self._get_expected_issue_comment(
GithubIssuesParser.ISSUE_COMMENT["prod"]
)
expected_comments = [expected_comment_1]
responses.add(method=responses.GET, url=api_url, json=expected_comments)
generator = self._create_generator(tag)
parser = GithubIssuesParser(generator, self.title)
parser.content = [
{
"issue_number": issue_number,
"pr_number": self.pr_number,
"pr_url": self.pr_url,
}
]
expected_render = self._create_expected_render(
issue_number, expected_issue["title"], False
)
self.assertEqual(parser.render(), expected_render)
self.assertEqual(len(responses.calls._calls), 3)
@responses.activate
def test_render_issue_without_prod_comment(self):
issue_number = self.issue_number_without_prod_comment
tag = self.tag_prod
self.mock_util.mock_get_repo()
# Mock the issue
api_url = "{}/issues/{}".format(self.repo_api_url, issue_number)
expected_issue = self._get_expected_issue(issue_number)
responses.add(method=responses.GET, url=api_url, json=expected_issue)
# Mock the comments list
api_url = "{}/issues/{}/comments".format(self.repo_api_url, issue_number)
expected_comment_1 = self._get_expected_issue_comment("Some other comment")
responses.add(
method=responses.GET, url=api_url, json=[], content_type="application/json"
)
# Mock the comment post response
api_url = "{}/issues/{}/comments".format(self.repo_api_url, issue_number)
expected_comment_1 = self._get_expected_issue_comment(
"{} {}".format(
GithubIssuesParser.ISSUE_COMMENT["prod"], self.version_number_prod
)
)
responses.add(method=responses.POST, url=api_url, json=expected_comment_1)
generator = self._create_generator(tag)
parser = GithubIssuesParser(generator, self.title)
parser.content = [
{
"issue_number": issue_number,
"pr_number": self.pr_number,
"pr_url": self.pr_url,
}
]
expected_render = self._create_expected_render(
issue_number, expected_issue["title"], False
)
render = parser.render()
self.assertEqual(render, expected_render)
self.assertEqual(len(responses.calls._calls), 4)
def _create_expected_render(self, issue_number, issue_title, link_pr):
render = "# {}\r\n\r\n#{}: {}".format(self.title, issue_number, issue_title)
if link_pr:
render += " [[PR{}]({})]".format(self.pr_number, self.pr_url)
return render
class TestInstallLinkParser:
def test_no_package_version(self):
generator = mock.Mock(
link_pr=True,
version_id=None,
sandbox_date=None, # need to set explicitly due to mock, will default to None when using CLI
production_date=None, # need to set explicitly due to mock, will default to None when using CLI
trial_info=False, # need to set explicitly due to mock, will default to False when using CLI)
)
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
assert parser.render() == ""
def test_package_with_version_id_no_dates_no_trial(self):
generator = mock.Mock(
link_pr=True,
version_id="04t0000asdf",
sandbox_date=None, # need to set explicitly due to mock, will default to None when using CLI
production_date=None, # need to set explicitly due to mock, will default to None when using CLI
trial_info=False, # need to set explicitly due to mock, will default to False when using CLI
)
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
version_id = urllib.parse.quote_plus(generator.version_id)
assert (
f"""# Title\r\n\r\nSandbox & Scratch Orgs:\r\nhttps://test.salesforce.com/packaging/installPackage.apexp?p0={version_id}\r\n\r\nProduction & Developer Edition Orgs:\r\nhttps://login.salesforce.com/packaging/installPackage.apexp?p0={version_id}"""
== parser.render()
)
def test_package_version_id_both_dates_no_trial(self):
generator = mock.Mock(
link_pr=True,
version_id="04t0000asdf",
sandbox_date="2020-10-10",
production_date="2020-10-11",
trial_info=False,
) # need to set explicitly due to mock, will default to False when using CLI
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
version_id = urllib.parse.quote_plus(generator.version_id)
assert (
f"""# Title\r\n\r\n## Push Schedule\r\nSandbox orgs: {generator.sandbox_date}\r\nProduction orgs: {generator.production_date}\r\n\r\nSandbox & Scratch Orgs:\r\nhttps://test.salesforce.com/packaging/installPackage.apexp?p0={version_id}\r\n\r\nProduction & Developer Edition Orgs:\r\nhttps://login.salesforce.com/packaging/installPackage.apexp?p0={version_id}"""
== parser.render()
)
def test_package_version_id_sandbox_date_no_trial(self):
generator = mock.Mock(
link_pr=True,
version_id="04t0000asdf",
sandbox_date="2020-10-10",
production_date=None, # need to set explicitly due to mock, will default to False when using CLI
trial_info=False, # need to set explicitly due to mock, will default to False when using CLI
)
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
version_id = urllib.parse.quote_plus(generator.version_id)
assert (
f"""# Title\r\n\r\n## Push Schedule\r\nSandbox orgs: {generator.sandbox_date}\r\n\r\nSandbox & Scratch Orgs:\r\nhttps://test.salesforce.com/packaging/installPackage.apexp?p0={version_id}\r\n\r\nProduction & Developer Edition Orgs:\r\nhttps://login.salesforce.com/packaging/installPackage.apexp?p0={version_id}"""
== parser.render()
)
def test_package_version_id_production_date_no_trial(self):
generator = mock.Mock(
link_pr=True,
version_id="04t0000asdf",
sandbox_date=None, # need to set explicitly due to mock, will default to None when using CLI
production_date="2020-10-10",
trial_info=False, # need to set explicitly due to mock, will default to False when using CLI
)
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
version_id = urllib.parse.quote_plus(generator.version_id)
assert (
f"""# Title\r\n\r\n## Push Schedule\r\nProduction orgs: {generator.production_date}\r\n\r\nSandbox & Scratch Orgs:\r\nhttps://test.salesforce.com/packaging/installPackage.apexp?p0={version_id}\r\n\r\nProduction & Developer Edition Orgs:\r\nhttps://login.salesforce.com/packaging/installPackage.apexp?p0={version_id}"""
== parser.render()
)
def test_package_with_version_id_no_dates_trial(self):
generator = mock.Mock(
link_pr=True,
version_id="04t0000asdf",
sandbox_date=None, # need to set explicitly due to mock, will default to None when using CLI
production_date=None, # need to set explicitly due to mock, will default to None when using CLI
trial_info="`TBD`",
)
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
version_id = urllib.parse.quote_plus(generator.version_id)
assert (
f"""# Title\r\n\r\nSandbox & Scratch Orgs:\r\nhttps://test.salesforce.com/packaging/installPackage.apexp?p0={version_id}\r\n\r\nProduction & Developer Edition Orgs:\r\nhttps://login.salesforce.com/packaging/installPackage.apexp?p0={version_id}\r\n\r\n## Trialforce Template ID\r\n`TBD`"""
== parser.render()
)
def test_package_version_id_both_dates_with_trial(self):
generator = mock.Mock(
link_pr=True,
version_id="04t0000asdf",
sandbox_date="2020-10-10",
production_date="2020-10-11",
trial_info="`TBD`",
)
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
version_id = urllib.parse.quote_plus(generator.version_id)
assert (
f"""# Title\r\n\r\n## Push Schedule\r\nSandbox orgs: {generator.sandbox_date}\r\nProduction orgs: {generator.production_date}\r\n\r\nSandbox & Scratch Orgs:\r\nhttps://test.salesforce.com/packaging/installPackage.apexp?p0={version_id}\r\n\r\nProduction & Developer Edition Orgs:\r\nhttps://login.salesforce.com/packaging/installPackage.apexp?p0={version_id}\r\n\r\n## Trialforce Template ID\r\n`TBD`"""
== parser.render()
)
def test_package_version_id_sandbox_date_with_trial(self):
generator = mock.Mock(
link_pr=True,
version_id="04t0000asdf",
sandbox_date="2020-10-10",
production_date=None, # need to set explicitly due to mock, will default to None when using CLI
trial_info="`TBD`",
)
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
version_id = urllib.parse.quote_plus(generator.version_id)
assert (
f"""# Title\r\n\r\n## Push Schedule\r\nSandbox orgs: {generator.sandbox_date}\r\n\r\nSandbox & Scratch Orgs:\r\nhttps://test.salesforce.com/packaging/installPackage.apexp?p0={version_id}\r\n\r\nProduction & Developer Edition Orgs:\r\nhttps://login.salesforce.com/packaging/installPackage.apexp?p0={version_id}\r\n\r\n## Trialforce Template ID\r\n`TBD`"""
== parser.render()
)
def test_package_version_id_production_date_with_trial(self):
generator = mock.Mock(
link_pr=True,
version_id="04t0000asdf",
sandbox_date=None, # need to set explicitly due to mock, will default to None when using CLI
production_date="2020-10-10",
trial_info=False, # need to set explicitly due to mock, will default to False when using CLI
)
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
version_id = urllib.parse.quote_plus(generator.version_id)
assert f"""# Title\r\n\r\n## Push Schedule\r\nProduction orgs: {generator.production_date}\r\n\r\n\r\nProduction & Developer Edition Orgs:\r\nhttps://login.salesforce.com/packaging/installPackage.apexp?p0={version_id}\r\n\r\nSandbox & Scratch Orgs:\r\nhttps://test.salesforce.com/packaging/installPackage.apexp?p0={version_id}\r\n\r\n## Trialforce Template ID\r\n`TBD`"""
def test_package_no_version_id_sandbox_date_no_trial(self):
generator = mock.Mock(
link_pr=True,
version_id="",
sandbox_date="2020-10-10", # need to set explicitly due to mock, will default to None when using CLI
production_date=None, # need to set explicitly due to mock, will default to None when using CLI
trial_info=False, # need to set explicitly due to mock, will default to False when using CLI
)
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
assert (
f"""# Title\r\n\r\n## Push Schedule\r\nSandbox orgs: {generator.sandbox_date}"""
== parser.render()
)
def test_package_no_version_id_production_date_no_trial(self):
generator = mock.Mock(
link_pr=True,
version_id="",
sandbox_date=None, # need to set explicitly due to mock, will default to None when using CLI
production_date="2020-10-10",
trial_info=False, # need to set explicitly due to mock, will default to False when using CLI
)
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
assert (
f"""# Title\r\n\r\n## Push Schedule\r\nProduction orgs: {generator.production_date}"""
== parser.render()
)
def test_package_no_version_id_both_dates_no_trial(self):
generator = mock.Mock(
link_pr=True,
version_id="",
sandbox_date="2020-10-10",
production_date="2020-10-11",
trial_info=False,
)
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
assert (
f"""# Title\r\n\r\n## Push Schedule\r\nSandbox orgs: {generator.sandbox_date}\r\nProduction orgs: {generator.production_date}"""
== parser.render()
)
def test_package_no_version_id_sandbox_date_trial(self):
generator = mock.Mock(
link_pr=True,
version_id="",
sandbox_date="2020-10-10", # need to set explicitly due to mock, will default to None when using CLI
production_date=None, # need to set explicitly due to mock, will default to None when using CLI
trial_info="`TBD`", # need to set explicitly due to mock, will default to False when using CLI
)
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
assert (
f"""# Title\r\n\r\n## Push Schedule\r\nSandbox orgs: {generator.sandbox_date}\r\n\r\n## Trialforce Template ID\r\n`TBD`"""
== parser.render()
)
def test_package_no_version_id_production_date_trial(self):
generator = mock.Mock(
link_pr=True,
version_id="",
sandbox_date=None, # need to set explicitly due to mock, will default to None when using CLI
production_date="2020-10-10",
trial_info="`TBD`", # need to set explicitly due to mock, will default to False when using CLI
)
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
assert (
f"""# Title\r\n\r\n## Push Schedule\r\nProduction orgs: {generator.production_date}\r\n\r\n## Trialforce Template ID\r\n`TBD`"""
== parser.render()
)
def test_package_no_version_id_both_dates_trial(self):
generator = mock.Mock(
link_pr=True,
version_id="",
sandbox_date="2020-10-10",
production_date="2020-10-11",
trial_info="`TBD`",
)
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
assert (
f"""# Title\r\n\r\n## Push Schedule\r\nSandbox orgs: {generator.sandbox_date}\r\nProduction orgs: {generator.production_date}\r\n\r\n## Trialforce Template ID\r\n`TBD`"""
== parser.render()
)
def test_package_no_version_no_dates_trial(self):
generator = mock.Mock(
link_pr=True,
version_id="",
sandbox_date=None, # need to set explicitly due to mock, will default to None when using CLI
production_date=None, # need to set explicitly due to mock, will default to None when using CLI
trial_info="`TBD`",
)
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
assert (
"""# Title\r\n\r\n## Trialforce Template ID\r\n`TBD`""" == parser.render()
)
def test_package_version(self):
generator = mock.Mock(link_pr=True)
generator.version_id = "foo bar"
parser = InstallLinkParser(generator, "Title")
parser.parse("abc")
output = parser.render()
assert (
"https://login.salesforce.com/packaging/installPackage.apexp?p0=foo+bar"
in output
)
assert (
"https://test.salesforce.com/packaging/installPackage.apexp?p0=foo+bar"
in output
)
|
|
#! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unittest for google.protobuf.internal.descriptor."""
__author__ = 'robinson@google.com (Will Robinson)'
import sys
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import unittest_custom_options_pb2
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_pb2
from google.protobuf import descriptor_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import test_util
from google.protobuf import descriptor
from google.protobuf import descriptor_pool
from google.protobuf import symbol_database
from google.protobuf import text_format
TEST_EMPTY_MESSAGE_DESCRIPTOR_ASCII = """
name: 'TestEmptyMessage'
"""
class DescriptorTest(unittest.TestCase):
def setUp(self):
file_proto = descriptor_pb2.FileDescriptorProto(
name='some/filename/some.proto',
package='protobuf_unittest')
message_proto = file_proto.message_type.add(
name='NestedMessage')
message_proto.field.add(
name='bb',
number=1,
type=descriptor_pb2.FieldDescriptorProto.TYPE_INT32,
label=descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL)
enum_proto = message_proto.enum_type.add(
name='ForeignEnum')
enum_proto.value.add(name='FOREIGN_FOO', number=4)
enum_proto.value.add(name='FOREIGN_BAR', number=5)
enum_proto.value.add(name='FOREIGN_BAZ', number=6)
file_proto.message_type.add(name='ResponseMessage')
service_proto = file_proto.service.add(
name='Service')
method_proto = service_proto.method.add(
name='CallMethod',
input_type='.protobuf_unittest.NestedMessage',
output_type='.protobuf_unittest.ResponseMessage')
# Note: Calling DescriptorPool.Add() multiple times with the same file only
# works if the input is canonical; in particular, all type names must be
# fully qualified.
self.pool = self.GetDescriptorPool()
self.pool.Add(file_proto)
self.my_file = self.pool.FindFileByName(file_proto.name)
self.my_message = self.my_file.message_types_by_name[message_proto.name]
self.my_enum = self.my_message.enum_types_by_name[enum_proto.name]
self.my_service = self.my_file.services_by_name[service_proto.name]
self.my_method = self.my_service.methods_by_name[method_proto.name]
def GetDescriptorPool(self):
return symbol_database.Default().pool
def testEnumValueName(self):
self.assertEqual(self.my_message.EnumValueName('ForeignEnum', 4),
'FOREIGN_FOO')
self.assertEqual(
self.my_message.enum_types_by_name[
'ForeignEnum'].values_by_number[4].name,
self.my_message.EnumValueName('ForeignEnum', 4))
def testEnumFixups(self):
self.assertEqual(self.my_enum, self.my_enum.values[0].type)
def testContainingTypeFixups(self):
self.assertEqual(self.my_message, self.my_message.fields[0].containing_type)
self.assertEqual(self.my_message, self.my_enum.containing_type)
def testContainingServiceFixups(self):
self.assertEqual(self.my_service, self.my_method.containing_service)
def testGetOptions(self):
self.assertEqual(self.my_enum.GetOptions(),
descriptor_pb2.EnumOptions())
self.assertEqual(self.my_enum.values[0].GetOptions(),
descriptor_pb2.EnumValueOptions())
self.assertEqual(self.my_message.GetOptions(),
descriptor_pb2.MessageOptions())
self.assertEqual(self.my_message.fields[0].GetOptions(),
descriptor_pb2.FieldOptions())
self.assertEqual(self.my_method.GetOptions(),
descriptor_pb2.MethodOptions())
self.assertEqual(self.my_service.GetOptions(),
descriptor_pb2.ServiceOptions())
def testSimpleCustomOptions(self):
file_descriptor = unittest_custom_options_pb2.DESCRIPTOR
message_descriptor =\
unittest_custom_options_pb2.TestMessageWithCustomOptions.DESCRIPTOR
field_descriptor = message_descriptor.fields_by_name['field1']
oneof_descriptor = message_descriptor.oneofs_by_name['AnOneof']
enum_descriptor = message_descriptor.enum_types_by_name['AnEnum']
enum_value_descriptor =\
message_descriptor.enum_values_by_name['ANENUM_VAL2']
service_descriptor =\
unittest_custom_options_pb2.TestServiceWithCustomOptions.DESCRIPTOR
method_descriptor = service_descriptor.FindMethodByName('Foo')
file_options = file_descriptor.GetOptions()
file_opt1 = unittest_custom_options_pb2.file_opt1
self.assertEqual(9876543210, file_options.Extensions[file_opt1])
message_options = message_descriptor.GetOptions()
message_opt1 = unittest_custom_options_pb2.message_opt1
self.assertEqual(-56, message_options.Extensions[message_opt1])
field_options = field_descriptor.GetOptions()
field_opt1 = unittest_custom_options_pb2.field_opt1
self.assertEqual(8765432109, field_options.Extensions[field_opt1])
field_opt2 = unittest_custom_options_pb2.field_opt2
self.assertEqual(42, field_options.Extensions[field_opt2])
oneof_options = oneof_descriptor.GetOptions()
oneof_opt1 = unittest_custom_options_pb2.oneof_opt1
self.assertEqual(-99, oneof_options.Extensions[oneof_opt1])
enum_options = enum_descriptor.GetOptions()
enum_opt1 = unittest_custom_options_pb2.enum_opt1
self.assertEqual(-789, enum_options.Extensions[enum_opt1])
enum_value_options = enum_value_descriptor.GetOptions()
enum_value_opt1 = unittest_custom_options_pb2.enum_value_opt1
self.assertEqual(123, enum_value_options.Extensions[enum_value_opt1])
service_options = service_descriptor.GetOptions()
service_opt1 = unittest_custom_options_pb2.service_opt1
self.assertEqual(-9876543210, service_options.Extensions[service_opt1])
method_options = method_descriptor.GetOptions()
method_opt1 = unittest_custom_options_pb2.method_opt1
self.assertEqual(unittest_custom_options_pb2.METHODOPT1_VAL2,
method_options.Extensions[method_opt1])
message_descriptor = (
unittest_custom_options_pb2.DummyMessageContainingEnum.DESCRIPTOR)
self.assertTrue(file_descriptor.has_options)
self.assertFalse(message_descriptor.has_options)
def testDifferentCustomOptionTypes(self):
kint32min = -2**31
kint64min = -2**63
kint32max = 2**31 - 1
kint64max = 2**63 - 1
kuint32max = 2**32 - 1
kuint64max = 2**64 - 1
message_descriptor =\
unittest_custom_options_pb2.CustomOptionMinIntegerValues.DESCRIPTOR
message_options = message_descriptor.GetOptions()
self.assertEqual(False, message_options.Extensions[
unittest_custom_options_pb2.bool_opt])
self.assertEqual(kint32min, message_options.Extensions[
unittest_custom_options_pb2.int32_opt])
self.assertEqual(kint64min, message_options.Extensions[
unittest_custom_options_pb2.int64_opt])
self.assertEqual(0, message_options.Extensions[
unittest_custom_options_pb2.uint32_opt])
self.assertEqual(0, message_options.Extensions[
unittest_custom_options_pb2.uint64_opt])
self.assertEqual(kint32min, message_options.Extensions[
unittest_custom_options_pb2.sint32_opt])
self.assertEqual(kint64min, message_options.Extensions[
unittest_custom_options_pb2.sint64_opt])
self.assertEqual(0, message_options.Extensions[
unittest_custom_options_pb2.fixed32_opt])
self.assertEqual(0, message_options.Extensions[
unittest_custom_options_pb2.fixed64_opt])
self.assertEqual(kint32min, message_options.Extensions[
unittest_custom_options_pb2.sfixed32_opt])
self.assertEqual(kint64min, message_options.Extensions[
unittest_custom_options_pb2.sfixed64_opt])
message_descriptor =\
unittest_custom_options_pb2.CustomOptionMaxIntegerValues.DESCRIPTOR
message_options = message_descriptor.GetOptions()
self.assertEqual(True, message_options.Extensions[
unittest_custom_options_pb2.bool_opt])
self.assertEqual(kint32max, message_options.Extensions[
unittest_custom_options_pb2.int32_opt])
self.assertEqual(kint64max, message_options.Extensions[
unittest_custom_options_pb2.int64_opt])
self.assertEqual(kuint32max, message_options.Extensions[
unittest_custom_options_pb2.uint32_opt])
self.assertEqual(kuint64max, message_options.Extensions[
unittest_custom_options_pb2.uint64_opt])
self.assertEqual(kint32max, message_options.Extensions[
unittest_custom_options_pb2.sint32_opt])
self.assertEqual(kint64max, message_options.Extensions[
unittest_custom_options_pb2.sint64_opt])
self.assertEqual(kuint32max, message_options.Extensions[
unittest_custom_options_pb2.fixed32_opt])
self.assertEqual(kuint64max, message_options.Extensions[
unittest_custom_options_pb2.fixed64_opt])
self.assertEqual(kint32max, message_options.Extensions[
unittest_custom_options_pb2.sfixed32_opt])
self.assertEqual(kint64max, message_options.Extensions[
unittest_custom_options_pb2.sfixed64_opt])
message_descriptor =\
unittest_custom_options_pb2.CustomOptionOtherValues.DESCRIPTOR
message_options = message_descriptor.GetOptions()
self.assertEqual(-100, message_options.Extensions[
unittest_custom_options_pb2.int32_opt])
self.assertAlmostEqual(12.3456789, message_options.Extensions[
unittest_custom_options_pb2.float_opt], 6)
self.assertAlmostEqual(1.234567890123456789, message_options.Extensions[
unittest_custom_options_pb2.double_opt])
self.assertEqual("Hello, \"World\"", message_options.Extensions[
unittest_custom_options_pb2.string_opt])
self.assertEqual(b"Hello\0World", message_options.Extensions[
unittest_custom_options_pb2.bytes_opt])
dummy_enum = unittest_custom_options_pb2.DummyMessageContainingEnum
self.assertEqual(
dummy_enum.TEST_OPTION_ENUM_TYPE2,
message_options.Extensions[unittest_custom_options_pb2.enum_opt])
message_descriptor =\
unittest_custom_options_pb2.SettingRealsFromPositiveInts.DESCRIPTOR
message_options = message_descriptor.GetOptions()
self.assertAlmostEqual(12, message_options.Extensions[
unittest_custom_options_pb2.float_opt], 6)
self.assertAlmostEqual(154, message_options.Extensions[
unittest_custom_options_pb2.double_opt])
message_descriptor =\
unittest_custom_options_pb2.SettingRealsFromNegativeInts.DESCRIPTOR
message_options = message_descriptor.GetOptions()
self.assertAlmostEqual(-12, message_options.Extensions[
unittest_custom_options_pb2.float_opt], 6)
self.assertAlmostEqual(-154, message_options.Extensions[
unittest_custom_options_pb2.double_opt])
def testComplexExtensionOptions(self):
descriptor =\
unittest_custom_options_pb2.VariousComplexOptions.DESCRIPTOR
options = descriptor.GetOptions()
self.assertEqual(42, options.Extensions[
unittest_custom_options_pb2.complex_opt1].foo)
self.assertEqual(324, options.Extensions[
unittest_custom_options_pb2.complex_opt1].Extensions[
unittest_custom_options_pb2.quux])
self.assertEqual(876, options.Extensions[
unittest_custom_options_pb2.complex_opt1].Extensions[
unittest_custom_options_pb2.corge].qux)
self.assertEqual(987, options.Extensions[
unittest_custom_options_pb2.complex_opt2].baz)
self.assertEqual(654, options.Extensions[
unittest_custom_options_pb2.complex_opt2].Extensions[
unittest_custom_options_pb2.grault])
self.assertEqual(743, options.Extensions[
unittest_custom_options_pb2.complex_opt2].bar.foo)
self.assertEqual(1999, options.Extensions[
unittest_custom_options_pb2.complex_opt2].bar.Extensions[
unittest_custom_options_pb2.quux])
self.assertEqual(2008, options.Extensions[
unittest_custom_options_pb2.complex_opt2].bar.Extensions[
unittest_custom_options_pb2.corge].qux)
self.assertEqual(741, options.Extensions[
unittest_custom_options_pb2.complex_opt2].Extensions[
unittest_custom_options_pb2.garply].foo)
self.assertEqual(1998, options.Extensions[
unittest_custom_options_pb2.complex_opt2].Extensions[
unittest_custom_options_pb2.garply].Extensions[
unittest_custom_options_pb2.quux])
self.assertEqual(2121, options.Extensions[
unittest_custom_options_pb2.complex_opt2].Extensions[
unittest_custom_options_pb2.garply].Extensions[
unittest_custom_options_pb2.corge].qux)
self.assertEqual(1971, options.Extensions[
unittest_custom_options_pb2.ComplexOptionType2
.ComplexOptionType4.complex_opt4].waldo)
self.assertEqual(321, options.Extensions[
unittest_custom_options_pb2.complex_opt2].fred.waldo)
self.assertEqual(9, options.Extensions[
unittest_custom_options_pb2.complex_opt3].qux)
self.assertEqual(22, options.Extensions[
unittest_custom_options_pb2.complex_opt3].complexoptiontype5.plugh)
self.assertEqual(24, options.Extensions[
unittest_custom_options_pb2.complexopt6].xyzzy)
# Check that aggregate options were parsed and saved correctly in
# the appropriate descriptors.
def testAggregateOptions(self):
file_descriptor = unittest_custom_options_pb2.DESCRIPTOR
message_descriptor =\
unittest_custom_options_pb2.AggregateMessage.DESCRIPTOR
field_descriptor = message_descriptor.fields_by_name["fieldname"]
enum_descriptor = unittest_custom_options_pb2.AggregateEnum.DESCRIPTOR
enum_value_descriptor = enum_descriptor.values_by_name["VALUE"]
service_descriptor =\
unittest_custom_options_pb2.AggregateService.DESCRIPTOR
method_descriptor = service_descriptor.FindMethodByName("Method")
# Tests for the different types of data embedded in fileopt
file_options = file_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.fileopt]
self.assertEqual(100, file_options.i)
self.assertEqual("FileAnnotation", file_options.s)
self.assertEqual("NestedFileAnnotation", file_options.sub.s)
self.assertEqual("FileExtensionAnnotation", file_options.file.Extensions[
unittest_custom_options_pb2.fileopt].s)
self.assertEqual("EmbeddedMessageSetElement", file_options.mset.Extensions[
unittest_custom_options_pb2.AggregateMessageSetElement
.message_set_extension].s)
# Simple tests for all the other types of annotations
self.assertEqual(
"MessageAnnotation",
message_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.msgopt].s)
self.assertEqual(
"FieldAnnotation",
field_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.fieldopt].s)
self.assertEqual(
"EnumAnnotation",
enum_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.enumopt].s)
self.assertEqual(
"EnumValueAnnotation",
enum_value_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.enumvalopt].s)
self.assertEqual(
"ServiceAnnotation",
service_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.serviceopt].s)
self.assertEqual(
"MethodAnnotation",
method_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.methodopt].s)
def testNestedOptions(self):
nested_message =\
unittest_custom_options_pb2.NestedOptionType.NestedMessage.DESCRIPTOR
self.assertEqual(1001, nested_message.GetOptions().Extensions[
unittest_custom_options_pb2.message_opt1])
nested_field = nested_message.fields_by_name["nested_field"]
self.assertEqual(1002, nested_field.GetOptions().Extensions[
unittest_custom_options_pb2.field_opt1])
outer_message =\
unittest_custom_options_pb2.NestedOptionType.DESCRIPTOR
nested_enum = outer_message.enum_types_by_name["NestedEnum"]
self.assertEqual(1003, nested_enum.GetOptions().Extensions[
unittest_custom_options_pb2.enum_opt1])
nested_enum_value = outer_message.enum_values_by_name["NESTED_ENUM_VALUE"]
self.assertEqual(1004, nested_enum_value.GetOptions().Extensions[
unittest_custom_options_pb2.enum_value_opt1])
nested_extension = outer_message.extensions_by_name["nested_extension"]
self.assertEqual(1005, nested_extension.GetOptions().Extensions[
unittest_custom_options_pb2.field_opt2])
def testFileDescriptorReferences(self):
self.assertEqual(self.my_enum.file, self.my_file)
self.assertEqual(self.my_message.file, self.my_file)
def testFileDescriptor(self):
self.assertEqual(self.my_file.name, 'some/filename/some.proto')
self.assertEqual(self.my_file.package, 'protobuf_unittest')
self.assertEqual(self.my_file.pool, self.pool)
# Generated modules also belong to the default pool.
self.assertEqual(unittest_pb2.DESCRIPTOR.pool, descriptor_pool.Default())
@unittest.skipIf(
api_implementation.Type() != 'cpp' or api_implementation.Version() != 2,
'Immutability of descriptors is only enforced in v2 implementation')
def testImmutableCppDescriptor(self):
message_descriptor = unittest_pb2.TestAllTypes.DESCRIPTOR
with self.assertRaises(AttributeError):
message_descriptor.fields_by_name = None
with self.assertRaises(TypeError):
message_descriptor.fields_by_name['Another'] = None
with self.assertRaises(TypeError):
message_descriptor.fields.append(None)
class NewDescriptorTest(DescriptorTest):
"""Redo the same tests as above, but with a separate DescriptorPool."""
def GetDescriptorPool(self):
return descriptor_pool.DescriptorPool()
class GeneratedDescriptorTest(unittest.TestCase):
"""Tests for the properties of descriptors in generated code."""
def CheckMessageDescriptor(self, message_descriptor):
# Basic properties
self.assertEqual(message_descriptor.name, 'TestAllTypes')
self.assertEqual(message_descriptor.full_name,
'protobuf_unittest.TestAllTypes')
# Test equality and hashability
self.assertEqual(message_descriptor, message_descriptor)
self.assertEqual(message_descriptor.fields[0].containing_type,
message_descriptor)
self.assertIn(message_descriptor, [message_descriptor])
self.assertIn(message_descriptor, {message_descriptor: None})
# Test field containers
self.CheckDescriptorSequence(message_descriptor.fields)
self.CheckDescriptorMapping(message_descriptor.fields_by_name)
self.CheckDescriptorMapping(message_descriptor.fields_by_number)
self.CheckDescriptorMapping(message_descriptor.fields_by_camelcase_name)
def CheckFieldDescriptor(self, field_descriptor):
# Basic properties
self.assertEqual(field_descriptor.name, 'optional_int32')
self.assertEqual(field_descriptor.camelcase_name, 'optionalInt32')
self.assertEqual(field_descriptor.full_name,
'protobuf_unittest.TestAllTypes.optional_int32')
self.assertEqual(field_descriptor.containing_type.name, 'TestAllTypes')
# Test equality and hashability
self.assertEqual(field_descriptor, field_descriptor)
self.assertEqual(
field_descriptor.containing_type.fields_by_name['optional_int32'],
field_descriptor)
self.assertEqual(
field_descriptor.containing_type.fields_by_camelcase_name[
'optionalInt32'],
field_descriptor)
self.assertIn(field_descriptor, [field_descriptor])
self.assertIn(field_descriptor, {field_descriptor: None})
def CheckDescriptorSequence(self, sequence):
# Verifies that a property like 'messageDescriptor.fields' has all the
# properties of an immutable abc.Sequence.
self.assertGreater(len(sequence), 0) # Sized
self.assertEqual(len(sequence), len(list(sequence))) # Iterable
item = sequence[0]
self.assertEqual(item, sequence[0])
self.assertIn(item, sequence) # Container
self.assertEqual(sequence.index(item), 0)
self.assertEqual(sequence.count(item), 1)
reversed_iterator = reversed(sequence)
self.assertEqual(list(reversed_iterator), list(sequence)[::-1])
self.assertRaises(StopIteration, next, reversed_iterator)
def CheckDescriptorMapping(self, mapping):
# Verifies that a property like 'messageDescriptor.fields' has all the
# properties of an immutable abc.Mapping.
self.assertGreater(len(mapping), 0) # Sized
self.assertEqual(len(mapping), len(list(mapping))) # Iterable
if sys.version_info >= (3,):
key, item = next(iter(list(mapping.items())))
else:
key, item = list(mapping.items())[0]
self.assertIn(key, mapping) # Container
self.assertEqual(mapping.get(key), item)
# keys(), iterkeys() &co
item = (next(iter(list(mapping.keys()))), next(iter(list(mapping.values()))))
self.assertEqual(item, next(iter(list(mapping.items()))))
if sys.version_info < (3,):
def CheckItems(seq, iterator):
self.assertEqual(next(iterator), seq[0])
self.assertEqual(list(iterator), seq[1:])
CheckItems(list(mapping.keys()), iter(mapping.keys()))
CheckItems(list(mapping.values()), iter(mapping.values()))
CheckItems(list(mapping.items()), iter(mapping.items()))
def testDescriptor(self):
message_descriptor = unittest_pb2.TestAllTypes.DESCRIPTOR
self.CheckMessageDescriptor(message_descriptor)
field_descriptor = message_descriptor.fields_by_name['optional_int32']
self.CheckFieldDescriptor(field_descriptor)
field_descriptor = message_descriptor.fields_by_camelcase_name[
'optionalInt32']
self.CheckFieldDescriptor(field_descriptor)
def testCppDescriptorContainer(self):
# Check that the collection is still valid even if the parent disappeared.
enum = unittest_pb2.TestAllTypes.DESCRIPTOR.enum_types_by_name['NestedEnum']
values = enum.values
del enum
self.assertEqual('FOO', values[0].name)
def testCppDescriptorContainer_Iterator(self):
# Same test with the iterator
enum = unittest_pb2.TestAllTypes.DESCRIPTOR.enum_types_by_name['NestedEnum']
values_iter = iter(enum.values)
del enum
self.assertEqual('FOO', next(values_iter).name)
class DescriptorCopyToProtoTest(unittest.TestCase):
"""Tests for CopyTo functions of Descriptor."""
def _AssertProtoEqual(self, actual_proto, expected_class, expected_ascii):
expected_proto = expected_class()
text_format.Merge(expected_ascii, expected_proto)
self.assertEqual(
actual_proto, expected_proto,
'Not equal,\nActual:\n%s\nExpected:\n%s\n'
% (str(actual_proto), str(expected_proto)))
def _InternalTestCopyToProto(self, desc, expected_proto_class,
expected_proto_ascii):
actual = expected_proto_class()
desc.CopyToProto(actual)
self._AssertProtoEqual(
actual, expected_proto_class, expected_proto_ascii)
def testCopyToProto_EmptyMessage(self):
self._InternalTestCopyToProto(
unittest_pb2.TestEmptyMessage.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_EMPTY_MESSAGE_DESCRIPTOR_ASCII)
def testCopyToProto_NestedMessage(self):
TEST_NESTED_MESSAGE_ASCII = """
name: 'NestedMessage'
field: <
name: 'bb'
number: 1
label: 1 # Optional
type: 5 # TYPE_INT32
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_NESTED_MESSAGE_ASCII)
def testCopyToProto_ForeignNestedMessage(self):
TEST_FOREIGN_NESTED_ASCII = """
name: 'TestForeignNested'
field: <
name: 'foreign_nested'
number: 1
label: 1 # Optional
type: 11 # TYPE_MESSAGE
type_name: '.protobuf_unittest.TestAllTypes.NestedMessage'
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestForeignNested.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_FOREIGN_NESTED_ASCII)
def testCopyToProto_ForeignEnum(self):
TEST_FOREIGN_ENUM_ASCII = """
name: 'ForeignEnum'
value: <
name: 'FOREIGN_FOO'
number: 4
>
value: <
name: 'FOREIGN_BAR'
number: 5
>
value: <
name: 'FOREIGN_BAZ'
number: 6
>
"""
self._InternalTestCopyToProto(
unittest_pb2.ForeignEnum.DESCRIPTOR,
descriptor_pb2.EnumDescriptorProto,
TEST_FOREIGN_ENUM_ASCII)
def testCopyToProto_Options(self):
TEST_DEPRECATED_FIELDS_ASCII = """
name: 'TestDeprecatedFields'
field: <
name: 'deprecated_int32'
number: 1
label: 1 # Optional
type: 5 # TYPE_INT32
options: <
deprecated: true
>
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestDeprecatedFields.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_DEPRECATED_FIELDS_ASCII)
def testCopyToProto_AllExtensions(self):
TEST_EMPTY_MESSAGE_WITH_EXTENSIONS_ASCII = """
name: 'TestEmptyMessageWithExtensions'
extension_range: <
start: 1
end: 536870912
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestEmptyMessageWithExtensions.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_EMPTY_MESSAGE_WITH_EXTENSIONS_ASCII)
def testCopyToProto_SeveralExtensions(self):
TEST_MESSAGE_WITH_SEVERAL_EXTENSIONS_ASCII = """
name: 'TestMultipleExtensionRanges'
extension_range: <
start: 42
end: 43
>
extension_range: <
start: 4143
end: 4244
>
extension_range: <
start: 65536
end: 536870912
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestMultipleExtensionRanges.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_MESSAGE_WITH_SEVERAL_EXTENSIONS_ASCII)
# Disable this test so we can make changes to the proto file.
# TODO(xiaofeng): Enable this test after cl/55530659 is submitted.
#
# def testCopyToProto_FileDescriptor(self):
# UNITTEST_IMPORT_FILE_DESCRIPTOR_ASCII = ("""
# name: 'google/protobuf/unittest_import.proto'
# package: 'protobuf_unittest_import'
# dependency: 'google/protobuf/unittest_import_public.proto'
# message_type: <
# name: 'ImportMessage'
# field: <
# name: 'd'
# number: 1
# label: 1 # Optional
# type: 5 # TYPE_INT32
# >
# >
# """ +
# """enum_type: <
# name: 'ImportEnum'
# value: <
# name: 'IMPORT_FOO'
# number: 7
# >
# value: <
# name: 'IMPORT_BAR'
# number: 8
# >
# value: <
# name: 'IMPORT_BAZ'
# number: 9
# >
# >
# options: <
# java_package: 'com.google.protobuf.test'
# optimize_for: 1 # SPEED
# >
# public_dependency: 0
# """)
# self._InternalTestCopyToProto(
# unittest_import_pb2.DESCRIPTOR,
# descriptor_pb2.FileDescriptorProto,
# UNITTEST_IMPORT_FILE_DESCRIPTOR_ASCII)
def testCopyToProto_ServiceDescriptor(self):
TEST_SERVICE_ASCII = """
name: 'TestService'
method: <
name: 'Foo'
input_type: '.protobuf_unittest.FooRequest'
output_type: '.protobuf_unittest.FooResponse'
>
method: <
name: 'Bar'
input_type: '.protobuf_unittest.BarRequest'
output_type: '.protobuf_unittest.BarResponse'
>
"""
# TODO(rocking): enable this test after the proto descriptor change is
# checked in.
#self._InternalTestCopyToProto(
# unittest_pb2.TestService.DESCRIPTOR,
# descriptor_pb2.ServiceDescriptorProto,
# TEST_SERVICE_ASCII)
class MakeDescriptorTest(unittest.TestCase):
def testMakeDescriptorWithNestedFields(self):
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.name = 'Foo2'
message_type = file_descriptor_proto.message_type.add()
message_type.name = file_descriptor_proto.name
nested_type = message_type.nested_type.add()
nested_type.name = 'Sub'
enum_type = nested_type.enum_type.add()
enum_type.name = 'FOO'
enum_type_val = enum_type.value.add()
enum_type_val.name = 'BAR'
enum_type_val.number = 3
field = message_type.field.add()
field.number = 1
field.name = 'uint64_field'
field.label = descriptor.FieldDescriptor.LABEL_REQUIRED
field.type = descriptor.FieldDescriptor.TYPE_UINT64
field = message_type.field.add()
field.number = 2
field.name = 'nested_message_field'
field.label = descriptor.FieldDescriptor.LABEL_REQUIRED
field.type = descriptor.FieldDescriptor.TYPE_MESSAGE
field.type_name = 'Sub'
enum_field = nested_type.field.add()
enum_field.number = 2
enum_field.name = 'bar_field'
enum_field.label = descriptor.FieldDescriptor.LABEL_REQUIRED
enum_field.type = descriptor.FieldDescriptor.TYPE_ENUM
enum_field.type_name = 'Foo2.Sub.FOO'
result = descriptor.MakeDescriptor(message_type)
self.assertEqual(result.fields[0].cpp_type,
descriptor.FieldDescriptor.CPPTYPE_UINT64)
self.assertEqual(result.fields[1].cpp_type,
descriptor.FieldDescriptor.CPPTYPE_MESSAGE)
self.assertEqual(result.fields[1].message_type.containing_type,
result)
self.assertEqual(result.nested_types[0].fields[0].full_name,
'Foo2.Sub.bar_field')
self.assertEqual(result.nested_types[0].fields[0].enum_type,
result.nested_types[0].enum_types[0])
self.assertFalse(result.has_options)
self.assertFalse(result.fields[0].has_options)
def testMakeDescriptorWithUnsignedIntField(self):
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.name = 'Foo'
message_type = file_descriptor_proto.message_type.add()
message_type.name = file_descriptor_proto.name
enum_type = message_type.enum_type.add()
enum_type.name = 'FOO'
enum_type_val = enum_type.value.add()
enum_type_val.name = 'BAR'
enum_type_val.number = 3
field = message_type.field.add()
field.number = 1
field.name = 'uint64_field'
field.label = descriptor.FieldDescriptor.LABEL_REQUIRED
field.type = descriptor.FieldDescriptor.TYPE_UINT64
enum_field = message_type.field.add()
enum_field.number = 2
enum_field.name = 'bar_field'
enum_field.label = descriptor.FieldDescriptor.LABEL_REQUIRED
enum_field.type = descriptor.FieldDescriptor.TYPE_ENUM
enum_field.type_name = 'Foo.FOO'
result = descriptor.MakeDescriptor(message_type)
self.assertEqual(result.fields[0].cpp_type,
descriptor.FieldDescriptor.CPPTYPE_UINT64)
def testMakeDescriptorWithOptions(self):
descriptor_proto = descriptor_pb2.DescriptorProto()
aggregate_message = unittest_custom_options_pb2.AggregateMessage
aggregate_message.DESCRIPTOR.CopyToProto(descriptor_proto)
reformed_descriptor = descriptor.MakeDescriptor(descriptor_proto)
options = reformed_descriptor.GetOptions()
self.assertEqual(101,
options.Extensions[unittest_custom_options_pb2.msgopt].i)
def testCamelcaseName(self):
descriptor_proto = descriptor_pb2.DescriptorProto()
descriptor_proto.name = 'Bar'
names = ['foo_foo', 'FooBar', 'fooBaz', 'fooFoo', 'foobar']
camelcase_names = ['fooFoo', 'fooBar', 'fooBaz', 'fooFoo', 'foobar']
for index in range(len(names)):
field = descriptor_proto.field.add()
field.number = index + 1
field.name = names[index]
result = descriptor.MakeDescriptor(descriptor_proto)
for index in range(len(camelcase_names)):
self.assertEqual(result.fields[index].camelcase_name,
camelcase_names[index])
def testJsonName(self):
descriptor_proto = descriptor_pb2.DescriptorProto()
descriptor_proto.name = 'TestJsonName'
names = ['field_name', 'fieldName', 'FieldName',
'_field_name', 'FIELD_NAME', 'json_name']
json_names = ['fieldName', 'fieldName', 'FieldName',
'FieldName', 'FIELDNAME', '@type']
for index in range(len(names)):
field = descriptor_proto.field.add()
field.number = index + 1
field.name = names[index]
field.json_name = '@type'
result = descriptor.MakeDescriptor(descriptor_proto)
for index in range(len(json_names)):
self.assertEqual(result.fields[index].json_name,
json_names[index])
if __name__ == '__main__':
unittest.main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import numpy as np
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.script import tir as T
from tvm.relay.testing import run_opt_pass
from tvm.relay.backend.contrib.ethosu.tir.compiler import lower_to_tir
from tvm.relay.backend.contrib.ethosu.tir.scheduler import Convolution2DCompute
from tvm.relay.backend.contrib.ethosu.tir.scheduler import copy_constants
from tvm.relay.backend.contrib.ethosu import tir_to_cs_translator
from .infra import make_ethosu_conv2d, make_ethosu_binary_elementwise
# fmt: off
@tvm.script.ir_module
class WeightStreamOnly:
@T.prim_func
def main(placeholder: T.handle, ethosu_write: T.handle, placeholder_1: T.handle, placeholder_2: T.handle, placeholder_3: T.handle, placeholder_4: T.handle, placeholder_5: T.handle, placeholder_6: T.handle, placeholder_7: T.handle, placeholder_8: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.match_buffer(placeholder_7, [112], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_1 = T.match_buffer(placeholder_4, [32], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_2 = T.match_buffer(placeholder_2, [32], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_3 = T.match_buffer(placeholder_8, [32], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_4 = T.match_buffer(placeholder_5, [112], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
placeholder_9 = T.match_buffer(placeholder, [1, 16, 16, 32], dtype="int8", elem_offset=0, align=128, offset_factor=1)
buffer_5 = T.match_buffer(placeholder_3, [112], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_6 = T.match_buffer(placeholder_1, [128], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
ethosu_write_1 = T.match_buffer(ethosu_write, [1, 16, 16, 8], dtype="int8", elem_offset=0, align=128, offset_factor=1)
buffer_7 = T.match_buffer(placeholder_6, [32], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
# body
placeholder_global = T.allocate([128], "uint8", "global", annotations={"disable_lower_builtin": True})
placeholder_d_global = T.allocate([32], "uint8", "global", annotations={"disable_lower_builtin": True})
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_6.data, 0), 128, T.load("uint8", placeholder_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_2.data, 0), 32, T.load("uint8", placeholder_d_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, T.load("int8", placeholder_9.data, 0), 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, T.load("int8", ethosu_write_1.data, 0), 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, T.load("uint8", placeholder_global, 0), 128, 12, T.load("uint8", placeholder_d_global, 0), 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_5.data, 0), 112, T.load("uint8", placeholder_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_1.data, 0), 32, T.load("uint8", placeholder_d_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, T.load("int8", placeholder_9.data, 0), 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, T.load("int8", ethosu_write_1.data, 2), 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, T.load("uint8", placeholder_global, 0), 112, 12, T.load("uint8", placeholder_d_global, 0), 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_4.data, 0), 112, T.load("uint8", placeholder_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_7.data, 0), 32, T.load("uint8", placeholder_d_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, T.load("int8", placeholder_9.data, 0), 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, T.load("int8", ethosu_write_1.data, 4), 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, T.load("uint8", placeholder_global, 0), 112, 12, T.load("uint8", placeholder_d_global, 0), 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer.data, 0), 112, T.load("uint8", placeholder_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_3.data, 0), 32, T.load("uint8", placeholder_d_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, T.load("int8", placeholder_9.data, 0), 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, T.load("int8", ethosu_write_1.data, 6), 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, T.load("uint8", placeholder_global, 0), 112, 12, T.load("uint8", placeholder_d_global, 0), 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
__tvm_meta__ = None
# fmt: on
def test_weight_stream_only():
def _planner(cached_func, const_dict, sch):
weights = cached_func.inputs[1]
bias = cached_func.inputs[2]
out = cached_func.outputs[0]
conv_compute = Convolution2DCompute.from_output(out)
co = conv_compute.split(sch, 3, 2)
cache_weights = sch.cache_read(weights, "global", [conv_compute.conv2d])
cache_bias = sch.cache_read(bias, "global", [conv_compute.conv2d])
sch[cache_weights].compute_at(sch[out], co)
sch[cache_bias].compute_at(sch[out], co)
def _get_func():
ifm = relay.var("ifm", shape=(1, 16, 16, 32), dtype="int8")
conv = make_ethosu_conv2d(
ifm,
32,
8,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
func = relay.Function(relay.analysis.free_vars(conv), conv)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
mod, consts = lower_to_tir(func, cascader=_planner)
script = mod.script(show_meta=True)
test_mod = tvm.script.from_source(script)
reference_mod = WeightStreamOnly
tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True)
reference_const_sizes = {2: 128, 3: 32, 4: 112, 5: 32, 6: 112, 7: 32, 8: 112, 9: 32}
test_const_sizes = {}
for key, value in consts.items():
test_const_sizes[key] = len(value)
assert reference_const_sizes == test_const_sizes
# fmt: off
@tvm.script.ir_module
class RereadWeights:
@T.prim_func
def main(placeholder: T.handle, placeholder_1: T.handle, placeholder_2: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
placeholder_3 = T.match_buffer(placeholder, [1, 16, 16, 32], dtype="int8")
buffer = T.match_buffer(placeholder_1, [304], dtype="uint8")
buffer_1 = T.match_buffer(placeholder_2, [80], dtype="uint8")
ethosu_write_1 = T.match_buffer(ethosu_write, [1, 16, 16, 8], dtype="int8")
# body
placeholder_global = T.allocate([304], "uint8", "global", annotations={"disable_lower_builtin":True})
placeholder_d_global = T.allocate([80], "uint8", "global", annotations={"disable_lower_builtin":True})
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer.data, 0), 304, T.load("uint8", placeholder_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_1.data, 0), 80, T.load("uint8", placeholder_d_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, T.load("int8", placeholder_3.data, 0), 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, T.load("int8", ethosu_write_1.data, 0), 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 1, 8, 1, 1, 1, 1, 1, 1, T.load("uint8", placeholder_global, 0), 304, 12, T.load("uint8", placeholder_d_global, 0), 80, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer.data, 0), 304, T.load("uint8", placeholder_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_1.data, 0), 80, T.load("uint8", placeholder_d_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, T.load("int8", placeholder_3.data, 256), 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, T.load("int8", ethosu_write_1.data, 64), 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 1, 8, 1, 1, 1, 1, 1, 1, T.load("uint8", placeholder_global, 0), 304, 12, T.load("uint8", placeholder_d_global, 0), 80, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
__tvm_meta__ = None
# fmt: on
def test_re_read_weights():
def _cascader(cached_func, const_dict, sch):
weights = cached_func.inputs[1]
bias = cached_func.inputs[2]
out = cached_func.outputs[0]
conv_compute = Convolution2DCompute.from_output(out)
co = conv_compute.split(sch, 2, 8)
cache_weights = sch.cache_read(weights, "global", [conv_compute.conv2d])
cache_bias = sch.cache_read(bias, "global", [conv_compute.conv2d])
sch[cache_weights].compute_at(sch[out], co)
sch[cache_bias].compute_at(sch[out], co)
def _get_func():
ifm = relay.var("ifm", shape=(1, 16, 16, 32), dtype="int8")
conv = make_ethosu_conv2d(
ifm,
32,
8,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
func = relay.Function(relay.analysis.free_vars(conv), conv)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
mod, consts = lower_to_tir(func, cascader=_cascader)
script = mod.script(show_meta=True)
test_mod = tvm.script.from_source(script)
reference_mod = RereadWeights
tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True)
reference_const_sizes = {1: 304, 2: 80}
test_const_sizes = {}
for key, value in consts.items():
test_const_sizes[key] = len(value)
assert reference_const_sizes == test_const_sizes
# fmt: off
@tvm.script.ir_module
class DirectReadOnly:
@T.prim_func
def main(placeholder: T.handle, placeholder_1: T.handle, placeholder_2: T.handle, placeholder_3: T.handle, placeholder_4: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.match_buffer(placeholder_3, [160], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
ethosu_write_1 = T.match_buffer(ethosu_write, [1, 16, 16, 8], dtype="int8", elem_offset=0, align=128, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder, [1, 16, 16, 32], dtype="int8", elem_offset=0, align=128, offset_factor=1)
buffer_1 = T.match_buffer(placeholder_1, [592], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_2 = T.match_buffer(placeholder_2, [160], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_3 = T.match_buffer(placeholder_4, [80], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
# body
ethosu_write_2 = T.allocate([4096], "int8", "global", annotations={"disable_lower_builtin": True})
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, T.load("int8", placeholder_5.data, 0), 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 16, 16, 0, 16, T.load("int8", ethosu_write_2, 0), 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 16, 1, 1, 1, 1, 1, 1, 1, T.load("uint8", buffer_1.data, 0), 592, 12, T.load("uint8", buffer_2.data, 0), 160, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, T.load("int8", ethosu_write_2, 0), 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 8, 16, 0, 16, T.load("int8", ethosu_write_1.data, 0), 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, T.load("uint8", buffer.data, 0), 160, 12, T.load("uint8", buffer_3.data, 0), 80, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
__tvm_meta__ = None
# fmt: on
def test_direct_read_only():
def _get_func():
ifm = relay.var("ifm", shape=(1, 16, 16, 32), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm,
32,
16,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
conv2 = make_ethosu_conv2d(
conv1,
16,
8,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
func = relay.Function(relay.analysis.free_vars(conv2), conv2)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
mod, consts = lower_to_tir(func)
script = mod.script(show_meta=True)
test_mod = tvm.script.from_source(script)
reference_mod = DirectReadOnly
tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True)
reference_const_sizes = {1: 592, 2: 160, 3: 160, 4: 80}
test_const_sizes = {}
for key, value in consts.items():
test_const_sizes[key] = len(value)
assert reference_const_sizes == test_const_sizes
# fmt: off
@tvm.script.ir_module
class MixedRead:
@T.prim_func
def main(placeholder: T.handle, placeholder_1: T.handle, placeholder_2: T.handle, ethosu_write: T.handle, placeholder_3: T.handle, placeholder_4: T.handle, placeholder_5: T.handle, placeholder_6: T.handle, placeholder_7: T.handle, placeholder_8: T.handle, placeholder_9: T.handle, placeholder_10: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.match_buffer(placeholder_7, [80], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_1 = T.match_buffer(placeholder_5, [80], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_2 = T.match_buffer(placeholder_3, [80], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_3 = T.match_buffer(placeholder_4, [32], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_4 = T.match_buffer(placeholder_9, [80], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_5 = T.match_buffer(placeholder_6, [32], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
placeholder_11 = T.match_buffer(placeholder, [1, 16, 16, 32], dtype="int8", elem_offset=0, align=128, offset_factor=1)
buffer_6 = T.match_buffer(placeholder_1, [592], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
ethosu_write_1 = T.match_buffer(ethosu_write, [1, 16, 16, 8], dtype="int8", elem_offset=0, align=128, offset_factor=1)
buffer_7 = T.match_buffer(placeholder_2, [160], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_8 = T.match_buffer(placeholder_8, [32], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
buffer_9 = T.match_buffer(placeholder_10, [32], dtype="uint8", elem_offset=0, align=128, offset_factor=1)
# body
ethosu_write_2 = T.allocate([4096], "int8", "global", annotations={"disable_lower_builtin": True})
placeholder_global = T.allocate([80], "uint8", "global", annotations={"disable_lower_builtin": True})
placeholder_d_global = T.allocate([32], "uint8", "global", annotations={"disable_lower_builtin": True})
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, T.load("int8", placeholder_11.data, 0), 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 16, 16, 0, 16, T.load("int8", ethosu_write_2, 0), 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 16, 1, 1, 1, 1, 1, 1, 1, T.load("uint8", buffer_6.data, 0), 592, 12, T.load("uint8", buffer_7.data, 0), 160, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_2.data, 0), 80, T.load("uint8", placeholder_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_3.data, 0), 32, T.load("uint8", placeholder_d_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, T.load("int8", ethosu_write_2, 0), 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, T.load("int8", ethosu_write_1.data, 0), 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, T.load("uint8", placeholder_global, 0), 80, 12, T.load("uint8", placeholder_d_global, 0), 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_1.data, 0), 80, T.load("uint8", placeholder_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_5.data, 0), 32, T.load("uint8", placeholder_d_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, T.load("int8", ethosu_write_2, 0), 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, T.load("int8", ethosu_write_1.data, 2), 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, T.load("uint8", placeholder_global, 0), 80, 12, T.load("uint8", placeholder_d_global, 0), 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer.data, 0), 80, T.load("uint8", placeholder_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_8.data, 0), 32, T.load("uint8", placeholder_d_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, T.load("int8", ethosu_write_2, 0), 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, T.load("int8", ethosu_write_1.data, 4), 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, T.load("uint8", placeholder_global, 0), 80, 12, T.load("uint8", placeholder_d_global, 0), 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_4.data, 0), 80, T.load("uint8", placeholder_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", T.load("uint8", buffer_9.data, 0), 32, T.load("uint8", placeholder_d_global, 0), dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, T.load("int8", ethosu_write_2, 0), 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, T.load("int8", ethosu_write_1.data, 6), 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, T.load("uint8", placeholder_global, 0), 80, 12, T.load("uint8", placeholder_d_global, 0), 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
__tvm_meta__ = None
# fmt: on
def test_mixed_read():
def _planner(cached_func, const_dict, sch):
weight = cached_func.inputs[4]
scale_bias = cached_func.inputs[5]
out = cached_func.outputs[0]
conv_compute = Convolution2DCompute.from_output(out)
co = conv_compute.split(sch, 3, 2)
cache_weight = sch.cache_read(weight, "global", [conv_compute.conv2d])
cache_scale_bias = sch.cache_read(scale_bias, "global", [conv_compute.conv2d])
sch[cache_weight].compute_at(sch[out], co)
sch[cache_scale_bias].compute_at(sch[out], co)
def _get_func():
ifm = relay.var("ifm", shape=(1, 16, 16, 32), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm,
32,
16,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
conv2 = make_ethosu_conv2d(
conv1,
16,
8,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
func = relay.Function(relay.analysis.free_vars(conv2), conv2)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
mod, consts = lower_to_tir(func, cascader=_planner)
script = mod.script(show_meta=True)
test_mod = tvm.script.from_source(script)
reference_mod = MixedRead
tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True)
reference_const_sizes = {
1: 592,
2: 160,
4: 80,
5: 32,
6: 80,
7: 32,
8: 80,
9: 32,
10: 80,
11: 32,
}
test_const_sizes = {}
for key, value in consts.items():
test_const_sizes[key] = len(value)
assert reference_const_sizes == test_const_sizes
def test_constant_as_input():
"""Test to check that constants specified as inputs aren't
interpreted as an encoded constant."""
def get_graph():
dtype = "uint8"
ifm = relay.var("ifm", shape=(1, 16, 16, 32), dtype=dtype)
conv1 = make_ethosu_conv2d(
ifm,
32,
16,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
scalar = relay.const(np.ones((1, 1, 1, 1), dtype=dtype), dtype=dtype)
add1 = make_ethosu_binary_elementwise(
conv1, scalar, ifm_channels=32, ifm2_channels=1, operator_type="ADD", ofm_dtype=dtype
)
func = relay.Function(relay.analysis.free_vars(add1), add1)
func = run_opt_pass(func, relay.transform.InferType())
return func
tir_mod, params = lower_to_tir(get_graph(), copy_constants())
# Check tile address for the scalar constant input hasn't been
# overwritten.
extern_calls = tir_mod["main"].body.body.body.body.body
binary_elementwise = extern_calls[-1].value
args = binary_elementwise.args
reason = "Tile address overwritten"
assert args[26] == 0, reason
assert args[27] == 0, reason
assert args[28] == 0, reason
# More generally, check compiles successfully to make sure
# nothing else was overrwritten.
# With Target Hooks the TIR module needs a target attached
# and lowered via make unpacked API.
tir_mod["main"] = tir_mod["main"].with_attr("target", tvm.target.Target("ethos-u"))
tir_mod = tvm.tir.transform.MakeUnpackedAPI()(tir_mod)
tir_to_cs_translator.translate(tir_mod, params)
if __name__ == "__main__":
pytest.main([__file__])
|
|
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # noqa: F401
import unittest
from unittest import mock
from tethys_cli.services_commands import (
services_create_persistent_command,
services_remove_persistent_command,
services_create_spatial_command,
services_remove_spatial_command,
services_list_command,
services_create_dataset_command,
services_remove_dataset_command,
services_create_wps_command,
services_remove_wps_command
)
from django.core.exceptions import ObjectDoesNotExist
from django.db.utils import IntegrityError
class ServicesCommandsTest(unittest.TestCase):
"""
Tests for tethys_cli.services_commands
"""
# Dictionary used in some of the tests
my_dict = {'id': 'Id_foo', 'name': 'Name_foo', 'host': 'Host_foo', 'port': 'Port_foo', 'endpoint': 'EndPoint_foo',
'public_endpoint': 'PublicEndPoint_bar', 'apikey': 'APIKey_foo'}
def setUp(self):
load_apps_patcher = mock.patch('tethys_cli.services_commands.load_apps')
load_apps_patcher.start()
self.addCleanup(load_apps_patcher.stop)
def tearDown(self):
pass
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_create_persistent_command(self, mock_service, mock_pretty_output):
"""
Test for services_create_persistent_command.
For running the test without any errors or problems.
:param mock_service: mock for PersistentStoreService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
services_create_persistent_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Successfully created new Persistent Store Service!', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_create_persistent_command_exception_attributeerror(self, mock_service, mock_pretty_output):
"""
Test for services_create_persistent_command.
For running the test with an IndexError exception thrown.
:param mock_service: mock for PersistentStoreService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_args.connection = AttributeError
services_create_persistent_command(mock_args)
mock_service.assert_not_called()
mock_service.objects.get().save.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Missing Input Parameters. Please check your input.', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_create_persistent_command_exception_indexerror(self, mock_service, mock_pretty_output):
"""
Test for services_create_persistent_command.
For running the test with an IndexError exception thrown.
:param mock_service: mock for PersistentStoreService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_service.side_effect = IndexError
services_create_persistent_command(mock_args)
mock_service.assert_called()
mock_service.objects.get().save.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('The connection argument (-c) must be of the form', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_create_persistent_command_exception_integrityerror(self, mock_service, mock_pretty_output):
"""
Test for services_create_persistent_command.
For running the test with an IntegrityError exception thrown.
:param mock_service: mock for PersistentStoreService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_service.side_effect = IntegrityError
services_create_persistent_command(mock_args)
mock_service.assert_called()
mock_service.objects.get().save.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Persistent Store Service with name', po_call_args[0][0][0])
self.assertIn('already exists. Command aborted.', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_remove_persistent_command_Exceptions(self, mock_service, mock_exit, mock_pretty_output):
"""
Test for services_remove_persistent_command
Test for handling all exceptions thrown by the function.
:param mock_service: mock for PersistentStoreService
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_service.__str__.return_value = 'Persistent Store'
mock_args.force = True
mock_service.objects.get.side_effect = [ValueError, ObjectDoesNotExist]
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, services_remove_persistent_command, mock_args)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('A Persistent Store Service with ID/Name', po_call_args[0][0][0])
self.assertIn('does not exist', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_remove_persistent_command_force(self, mock_service, mock_exit, mock_pretty_output):
"""
Test for services_remove_persistent_command
Test for forcing a delete of the service
:param mock_service: mock for PersistentStoreService
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_args.force = True
mock_service.__str__.return_value = 'Persistent Store'
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, services_remove_persistent_command, mock_args)
mock_service.objects.get().delete.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Successfully removed Persistent Store Service', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.input')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_remove_persistent_command_no_proceed_invalid_char(self, mock_service, mock_exit,
mock_pretty_output, mock_input):
"""
Test for services_remove_persistent_command
Handles answering the prompt to delete with invalid characters, and answering no.
:param mock_service: mock for PersistentStoreService
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:param mock_input: mock for handling raw_input requests
:return:
"""
mock_args = mock.MagicMock()
mock_args.force = False
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
mock_input.side_effect = ['foo', 'N']
mock_service.__str__.return_value = 'Persistent Store'
self.assertRaises(SystemExit, services_remove_persistent_command, mock_args)
mock_service.objects.get().delete.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Aborted. Persistent Store Service not removed.', po_call_args[0][0][0])
po_call_args = mock_input.call_args_list
self.assertEqual(2, len(po_call_args))
self.assertEqual(
'Are you sure you want to delete this Persistent Store Service? [y/n]: ', po_call_args[0][0][0])
self.assertEqual('Please enter either "y" or "n": ', po_call_args[1][0][0])
@mock.patch('tethys_cli.services_commands.input')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_remove_persistent_command_proceed(self, mock_service, mock_exit, mock_pretty_output, mock_input):
"""
Test for services_remove_persistent_command
Handles answering the prompt to delete with invalid characters by answering yes
:param mock_service: mock for PersistentStoreService
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:param mock_input: mock for handling raw_input requests
:return:
"""
mock_args = mock.MagicMock()
mock_service.__str__.return_value = 'Persistent Store'
mock_args.force = False
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
mock_input.side_effect = ['y']
self.assertRaises(SystemExit, services_remove_persistent_command, mock_args)
mock_service.objects.get().delete.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Successfully removed Persistent Store Service', po_call_args[0][0][0])
po_call_args = mock_input.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual(
'Are you sure you want to delete this Persistent Store Service? [y/n]: ', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.SpatialDatasetService')
def test_services_create_spatial_command_IndexError(self, mock_service, mock_pretty_output):
"""
Test for services_create_spatial_command
Handles an IndexError exception
:param mock_service: mock for SpatialDatasetService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_args.connection = 'IndexError:9876@IndexError' # No 'http' or '://'
mock_args.type = 'GeoServer'
services_create_spatial_command(mock_args)
mock_service.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('The connection argument (-c) must be of the form', po_call_args[0][0][0])
self.assertIn('"<username>:<password>@<protocol>//<host>:<port>".', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.SpatialDatasetService')
def test_services_create_spatial_command_FormatError(self, mock_service, mock_pretty_output):
"""
Test for services_create_spatial_command
Handles an FormatError exception
:param mock_service: mock for SpatialDatasetService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_args.connection = 'foo:pass@http:://foo:1234'
mock_args.public_endpoint = 'foo@foo:foo' # No 'http' or '://'
mock_args.type = 'GeoServer'
services_create_spatial_command(mock_args)
mock_service.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('The public_endpoint argument (-p) must be of the form ', po_call_args[0][0][0])
self.assertIn('"<protocol>//<host>:<port>".', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.SpatialDatasetService')
def test_services_create_spatial_command_IntegrityError(self, mock_service, mock_pretty_output):
"""
Test for services_create_spatial_command
Handles an IntegrityError exception
:param mock_service: mock for SpatialDatasetService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_args.connection = 'foo:pass@http:://foo:1234'
mock_args.public_endpoint = 'http://foo:1234'
mock_args.type = 'GeoServer'
mock_service.side_effect = IntegrityError
services_create_spatial_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Spatial Dataset Service with name ', po_call_args[0][0][0])
self.assertIn('already exists. Command aborted.', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.SpatialDatasetService')
def test_services_create_spatial_command_geoserver(self, mock_service, mock_pretty_output):
"""
Test for services_create_spatial_command
For going through the function and saving
:param mock_service: mock for SpatialDatasetService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock(
connection='foo:pass@http://localhost:8181/geoserver/rest/',
public_endpoint='https://www.example.com:443/geoserver/rest/',
apikey='apikey123',
type='GeoServer'
)
mock_args.name = 'test_geoserver'
services_create_spatial_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Successfully created new Spatial Dataset Service!', po_call_args[0][0][0])
mock_service.assert_called_with(
name='test_geoserver',
endpoint='http://localhost:8181/geoserver/rest/',
public_endpoint='https://www.example.com:443/geoserver/rest/',
apikey='apikey123',
username='foo',
password='pass',
engine=mock_service.GEOSERVER
)
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.SpatialDatasetService')
def test_services_create_spatial_command_thredds(self, mock_service, mock_pretty_output):
"""
Test for services_create_spatial_command
For going through the function and saving
:param mock_service: mock for SpatialDatasetService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock(
connection='foo:pass@http://localhost:8181/thredds/catalog.xml',
public_endpoint='https://www.example.com:443/thredds/catalog.xml',
apikey='apikey123',
type='THREDDS'
)
mock_args.name = 'test_thredds'
services_create_spatial_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Successfully created new Spatial Dataset Service!', po_call_args[0][0][0])
mock_service.assert_called_with(
name='test_thredds',
endpoint='http://localhost:8181/thredds/catalog.xml',
public_endpoint='https://www.example.com:443/thredds/catalog.xml',
apikey='apikey123',
username='foo',
password='pass',
engine=mock_service.THREDDS
)
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.WebProcessingService')
def test_services_remove_wps_command_Exceptions(self, mock_service, mock_exit, mock_pretty_output):
"""
Test for services_remove_wps_command
Handles testing all of the exceptions thrown
:param mock_service: mock for Web Processing Service
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_service.__str__.return_value = 'Web Processing'
mock_service.objects.get.side_effect = [ValueError, ObjectDoesNotExist]
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, services_remove_wps_command, mock_args)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('A Web Processing Service with ID/Name', po_call_args[0][0][0])
self.assertIn('does not exist.', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.SpatialDatasetService')
def test_services_remove_spatial_command_force(self, mock_service, mock_exit, mock_pretty_output):
"""
Test for services_remove_spatial_command
For when a delete is forced
:param mock_service: mock for SpatialDatasetService
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_service.__str__.return_value = 'Spatial Dataset'
mock_args.force = True
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, services_remove_spatial_command, mock_args)
mock_service.objects.get().delete.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Successfully removed Spatial Dataset Service', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.input')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.SpatialDatasetService')
def test_services_remove_spatial_command_no_proceed_invalid_char(self, mock_service, mock_exit,
mock_pretty_output, mock_input):
"""
Test for services_remove_spatial_command
For when deleting is not forced, and when prompted, giving an invalid answer, then no delete
:param mock_service: mock for SpatialDatasetService
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:param mock_input: mock for handling raw_input requests
:return:
"""
mock_args = mock.MagicMock()
mock_service.__str__.return_value = 'Spatial Dataset'
mock_args.force = False
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
mock_input.side_effect = ['foo', 'N']
self.assertRaises(SystemExit, services_remove_spatial_command, mock_args)
mock_service.objects.get().delete.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Aborted. Spatial Dataset Service not removed.', po_call_args[0][0][0])
po_call_args = mock_input.call_args_list
self.assertEqual(2, len(po_call_args))
self.assertEqual('Are you sure you want to delete this Spatial Dataset Service? [y/n]: ', po_call_args[0][0][0])
self.assertEqual('Please enter either "y" or "n": ', po_call_args[1][0][0])
@mock.patch('tethys_cli.services_commands.input')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.SpatialDatasetService')
def test_services_remove_spatial_command_proceed(self, mock_service, mock_exit, mock_pretty_output, mock_input):
"""
Test for services_remove_spatial_command
For when deleting is not forced, and when prompted, giving a valid answer to delete
:param mock_service: mock for SpatialDatasetService
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:param mock_input: mock for handling raw_input requests
:return:
"""
mock_args = mock.MagicMock()
mock_service.__str__.return_value = 'Spatial Dataset'
mock_args.force = False
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
mock_input.side_effect = ['y']
self.assertRaises(SystemExit, services_remove_spatial_command, mock_args)
mock_service.objects.get().delete.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Successfully removed Spatial Dataset Service', po_call_args[0][0][0])
po_call_args = mock_input.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Are you sure you want to delete this Spatial Dataset Service? [y/n]: ', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.print')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.PersistentStoreService')
@mock.patch('tethys_services.models.SpatialDatasetService')
@mock.patch('tethys_cli.services_commands.model_to_dict')
def test_services_list_command_not_spatial_not_persistent(self, mock_mtd, mock_spatial, mock_persistent,
mock_pretty_output, mock_print):
"""
Test for services_list_command
Both spatial and persistent are not set, so both are processed
:param mock_mtd: mock for model_to_dict to return a dictionary
:param mock_spatial: mock for SpatialDatasetService
:param mock_persistent: mock for PersistentStoreService
:param mock_pretty_output: mock for pretty_output text
:param mock_stdout: mock for text written with print statements
:return:
"""
mock_mtd.return_value = self.my_dict
mock_args = mock.MagicMock()
mock_args.spatial = False
mock_args.persistent = False
mock_args.dataset = False
mock_args.wps = False
mock_spatial.objects.order_by('id').all.return_value = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock(),
mock.MagicMock()]
mock_persistent.objects.order_by('id').all.return_value = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock(),
mock.MagicMock()]
services_list_command(mock_args)
# Check expected pretty_output
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(4, len(po_call_args))
self.assertIn('Persistent Store Services:', po_call_args[0][0][0])
self.assertIn('ID', po_call_args[1][0][0])
self.assertIn('Name', po_call_args[1][0][0])
self.assertIn('Host', po_call_args[1][0][0])
self.assertIn('Port', po_call_args[1][0][0])
self.assertNotIn('Endpoint', po_call_args[1][0][0])
self.assertNotIn('Public Endpoint', po_call_args[1][0][0])
self.assertNotIn('API Key', po_call_args[1][0][0])
self.assertIn('Spatial Dataset Services:', po_call_args[2][0][0])
self.assertIn('ID', po_call_args[3][0][0])
self.assertIn('Name', po_call_args[3][0][0])
self.assertNotIn('Host', po_call_args[3][0][0])
self.assertNotIn('Port', po_call_args[3][0][0])
self.assertIn('Endpoint', po_call_args[3][0][0])
self.assertIn('Public Endpoint', po_call_args[3][0][0])
self.assertIn('API Key', po_call_args[3][0][0])
# Check text written with Python's print
rts_call_args = mock_print.call_args_list
self.assertIn(self.my_dict['id'], rts_call_args[0][0][0])
self.assertIn(self.my_dict['name'], rts_call_args[0][0][0])
self.assertIn(self.my_dict['host'], rts_call_args[0][0][0])
self.assertIn(self.my_dict['port'], rts_call_args[0][0][0])
self.assertIn(self.my_dict['id'], rts_call_args[4][0][0])
self.assertIn(self.my_dict['name'], rts_call_args[4][0][0])
self.assertNotIn(self.my_dict['host'], rts_call_args[4][0][0])
self.assertNotIn(self.my_dict['port'], rts_call_args[4][0][0])
self.assertIn(self.my_dict['endpoint'], rts_call_args[4][0][0])
self.assertIn(self.my_dict['public_endpoint'], rts_call_args[4][0][0])
self.assertIn(self.my_dict['apikey'], rts_call_args[4][0][0])
@mock.patch('tethys_cli.services_commands.print')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.SpatialDatasetService')
@mock.patch('tethys_cli.services_commands.model_to_dict')
def test_services_list_command_spatial(self, mock_mtd, mock_spatial, mock_pretty_output, mock_print):
"""
Test for services_list_command
Only spatial is set
:param mock_mtd: mock for model_to_dict to return a dictionary
:param mock_spatial: mock for SpatialDatasetService
:param mock_pretty_output: mock for pretty_output text
:param mock_stdout: mock for text written with print statements
:return:
"""
mock_mtd.return_value = self.my_dict
mock_args = mock.MagicMock()
mock_args.spatial = True
mock_args.persistent = False
mock_args.dataset = False
mock_args.wps = False
mock_spatial.objects.order_by('id').all.return_value = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()]
services_list_command(mock_args)
# Check expected pretty_output
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(2, len(po_call_args))
self.assertIn('Spatial Dataset Services:', po_call_args[0][0][0])
self.assertIn('ID', po_call_args[1][0][0])
self.assertIn('Name', po_call_args[1][0][0])
self.assertNotIn('Host', po_call_args[1][0][0])
self.assertNotIn('Port', po_call_args[1][0][0])
self.assertIn('Endpoint', po_call_args[1][0][0])
self.assertIn('Public Endpoint', po_call_args[1][0][0])
self.assertIn('API Key', po_call_args[1][0][0])
# Check text written with Python's print
rts_call_args = mock_print.call_args_list
self.assertIn(self.my_dict['id'], rts_call_args[2][0][0])
self.assertIn(self.my_dict['name'], rts_call_args[2][0][0])
self.assertNotIn(self.my_dict['host'], rts_call_args[2][0][0])
self.assertNotIn(self.my_dict['port'], rts_call_args[2][0][0])
self.assertIn(self.my_dict['endpoint'], rts_call_args[2][0][0])
self.assertIn(self.my_dict['public_endpoint'], rts_call_args[2][0][0])
self.assertIn(self.my_dict['apikey'], rts_call_args[2][0][0])
@mock.patch('tethys_cli.services_commands.print')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.PersistentStoreService')
@mock.patch('tethys_cli.services_commands.model_to_dict')
def test_services_list_command_persistent(self, mock_mtd, mock_persistent, mock_pretty_output, mock_print):
"""
Test for services_list_command
Only persistent is set
:param mock_mtd: mock for model_to_dict to return a dictionary
:param mock_persistent: mock for PersistentStoreService
:param mock_pretty_output: mock for pretty_output text
:param mock_stdout: mock for text written with print statements
:return:
"""
mock_mtd.return_value = self.my_dict
mock_args = mock.MagicMock()
mock_args.spatial = False
mock_args.persistent = True
mock_args.dataset = False
mock_args.wps = False
mock_persistent.objects.order_by('id').all.return_value = [mock.MagicMock(), mock.MagicMock()]
services_list_command(mock_args)
# Check expected pretty_output
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(2, len(po_call_args))
self.assertIn('Persistent Store Services:', po_call_args[0][0][0])
self.assertIn('ID', po_call_args[1][0][0])
self.assertIn('Name', po_call_args[1][0][0])
self.assertIn('Host', po_call_args[1][0][0])
self.assertIn('Port', po_call_args[1][0][0])
self.assertNotIn('Endpoint', po_call_args[1][0][0])
self.assertNotIn('Public Endpoint', po_call_args[1][0][0])
self.assertNotIn('API Key', po_call_args[1][0][0])
# Check text written with Python's print
rts_call_args = mock_print.call_args_list
self.assertIn(self.my_dict['id'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['name'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['host'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['port'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['endpoint'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['public_endpoint'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['apikey'], rts_call_args[1][0][0])
@mock.patch('tethys_cli.services_commands.print')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.DatasetService')
@mock.patch('tethys_cli.services_commands.model_to_dict')
def test_services_list_command_dataset(self, mock_mtd, mock_dataset, mock_pretty_output, mock_print):
"""
Test for services_list_command
Only dataset is set
:param mock_mtd: mock for model_to_dict to return a dictionary
:param mock_dataset: mock for DatasetService
:param mock_pretty_output: mock for pretty_output text
:param mock_stdout: mock for text written with print statements
:return:
"""
mock_mtd.return_value = self.my_dict
mock_args = mock.MagicMock()
mock_args.spatial = False
mock_args.persistent = False
mock_args.dataset = True
mock_args.wps = False
mock_dataset.objects.order_by('id').all.return_value = [mock.MagicMock(), mock.MagicMock()]
services_list_command(mock_args)
# Check expected pretty_output
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(2, len(po_call_args))
self.assertIn('Dataset Services:', po_call_args[0][0][0])
self.assertIn('ID', po_call_args[1][0][0])
self.assertIn('Name', po_call_args[1][0][0])
self.assertIn('Endpoint', po_call_args[1][0][0])
self.assertIn('Public Endpoint', po_call_args[1][0][0])
self.assertIn('API Key', po_call_args[1][0][0])
self.assertNotIn('Host', po_call_args[1][0][0])
self.assertNotIn('Port', po_call_args[1][0][0])
# Check text written with Python's print
rts_call_args = mock_print.call_args_list
self.assertIn(self.my_dict['id'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['name'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['endpoint'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['public_endpoint'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['apikey'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['host'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['port'], rts_call_args[1][0][0])
@mock.patch('tethys_cli.services_commands.print')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.WebProcessingService')
@mock.patch('tethys_cli.services_commands.model_to_dict')
def test_services_list_command_wps(self, mock_mtd, mock_wps, mock_pretty_output, mock_print):
"""
Test for services_list_command
Only dataset is set
:param mock_mtd: mock for model_to_dict to return a dictionary
:param mock_wps: mock for WebProcessingService
:param mock_pretty_output: mock for pretty_output text
:param mock_stdout: mock for text written with print statements
:return:
"""
mock_mtd.return_value = self.my_dict
mock_args = mock.MagicMock()
mock_args.spatial = False
mock_args.persistent = False
mock_args.dataset = False
mock_args.wps = True
mock_wps.objects.order_by('id').all.return_value = [mock.MagicMock(), mock.MagicMock()]
services_list_command(mock_args)
# Check expected pretty_output
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(2, len(po_call_args))
self.assertIn('Web Processing Services:', po_call_args[0][0][0])
self.assertIn('ID', po_call_args[1][0][0])
self.assertIn('Name', po_call_args[1][0][0])
self.assertIn('Endpoint', po_call_args[1][0][0])
self.assertIn('Public Endpoint', po_call_args[1][0][0])
self.assertNotIn('Host', po_call_args[1][0][0])
self.assertNotIn('Port', po_call_args[1][0][0])
self.assertNotIn('API Key', po_call_args[1][0][0])
# Check text written with Python's print
rts_call_args = mock_print.call_args_list
self.assertIn(self.my_dict['id'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['name'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['endpoint'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['public_endpoint'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['host'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['port'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['apikey'], rts_call_args[1][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.DatasetService')
def test_services_create_dataset_command_IndexError(self, mock_service, mock_pretty_output):
mock_args = mock.MagicMock()
mock_args.connection = 'IndexError:9876@IndexError' # No 'http' or '://'
mock_args.type = 'HydroShare'
services_create_dataset_command(mock_args)
mock_service.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('The connection argument (-c) must be of the form', po_call_args[0][0][0])
self.assertIn('"<username>:<password>@<protocol>//<host>:<port>".', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.DatasetService')
def test_services_create_dataset_command_FormatError(self, mock_service, mock_pretty_output):
mock_args = mock.MagicMock()
mock_args.connection = 'foo:pass@http:://foo:1234'
mock_args.public_endpoint = 'foo@foo:foo' # No 'http' or '://'
mock_args.type = 'HydroShare'
services_create_dataset_command(mock_args)
mock_service.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('The public_endpoint argument (-p) must be of the form ', po_call_args[0][0][0])
self.assertIn('"<protocol>//<host>:<port>".', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.DatasetService')
def test_services_create_dataset_command_IntegrityError(self, mock_service, mock_pretty_output):
mock_args = mock.MagicMock()
mock_args.connection = 'foo:pass@http:://foo:1234'
mock_args.public_endpoint = 'http://foo:1234'
mock_args.type = 'HydroShare'
mock_service.side_effect = IntegrityError
services_create_dataset_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Dataset Service with name ', po_call_args[0][0][0])
self.assertIn('already exists. Command aborted.', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.DatasetService')
def test_services_create_dataset_command_hydroshare(self, mock_service, mock_pretty_output):
mock_args = mock.MagicMock(
connection='foo:pass@http://localhost:80',
public_endpoint='http://www.example.com:80',
apikey='apikey123',
type='HydroShare'
)
mock_args.name = 'test_hydroshare'
services_create_dataset_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Successfully created new Dataset Service!', po_call_args[0][0][0])
mock_service.assert_called_with(
name='test_hydroshare',
endpoint='http://localhost:80',
public_endpoint='http://www.example.com:80',
apikey='apikey123',
username='foo',
password='pass',
engine=mock_service.HYDROSHARE
)
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.DatasetService')
def test_services_create_dataset_command_ckan(self, mock_service, mock_pretty_output):
mock_args = mock.MagicMock(
connection='foo:pass@http://localhost:80',
public_endpoint='http://www.example.com:80',
apikey='apikey123',
type='CKAN'
)
mock_args.name = 'test_ckan'
services_create_dataset_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Successfully created new Dataset Service!', po_call_args[0][0][0])
mock_service.assert_called_with(
name='test_ckan',
endpoint='http://localhost:80',
public_endpoint='http://www.example.com:80',
apikey='apikey123',
username='foo',
password='pass',
engine=mock_service.CKAN
)
@mock.patch('tethys_cli.services_commands.input')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.DatasetService')
def test_services_remove_dataset_command_proceed(self, mock_service, mock_exit, mock_pretty_output, mock_input):
mock_args = mock.MagicMock()
mock_service.__str__.return_value = 'Dataset'
mock_args.force = False
mock_exit.side_effect = SystemExit
mock_input.side_effect = ['y']
self.assertRaises(SystemExit, services_remove_dataset_command, mock_args)
mock_service.objects.get().delete.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Successfully removed Dataset Service', po_call_args[0][0][0])
po_call_args = mock_input.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Are you sure you want to delete this Dataset Service? [y/n]: ', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.WebProcessingService')
def test_services_create_wps_command_IndexError(self, mock_service, mock_pretty_output):
mock_args = mock.MagicMock()
mock_args.connection = 'IndexError:9876@IndexError' # No 'http' or '://'
services_create_wps_command(mock_args)
mock_service.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('The connection argument (-c) must be of the form', po_call_args[0][0][0])
self.assertIn('"<username>:<password>@<protocol>//<host>:<port>".', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.WebProcessingService')
def test_services_create_wps_command_IntegrityError(self, mock_service, mock_pretty_output):
mock_args = mock.MagicMock()
mock_args.connection = 'foo:pass@http:://foo:1234'
mock_args.public_endpoint = 'http://foo:1234'
mock_service.side_effect = IntegrityError
services_create_wps_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Web Processing Service with name ', po_call_args[0][0][0])
self.assertIn('already exists. Command aborted.', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.WebProcessingService')
def test_services_create_wps_command(self, mock_service, mock_pretty_output):
mock_args = mock.MagicMock()
mock_args.connection = 'foo:pass@http:://foo:1234'
mock_service.return_value = mock.MagicMock()
services_create_wps_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Successfully created new Web Processing Service!', po_call_args[0][0][0])
|
|
"""
ExampleRawIO is a class of a fake example.
This is to be used when coding a new RawIO.
Rules for creating a new class:
1. Step 1: Create the main class
* Create a file in **neo/rawio/** that endith with "rawio.py"
* Create the class that inherits BaseRawIO
* copy/paste all methods that need to be implemented.
See the end a neo.rawio.baserawio.BaseRawIO
* code hard! The main difficulty **is _parse_header()**.
In short you have a create a mandatory dict than
contains channel informations::
self.header = {}
self.header['nb_block'] = 2
self.header['nb_segment'] = [2, 3]
self.header['signal_channels'] = sig_channels
self.header['unit_channels'] = unit_channels
self.header['event_channels'] = event_channels
2. Step 2: RawIO test:
* create a file in neo/rawio/tests with the same name with "test_" prefix
* copy paste neo/rawio/tests/test_examplerawio.py and do the same
3. Step 3 : Create the neo.io class with the wrapper
* Create a file in neo/io/ that endith with "io.py"
* Create a that inherits both your RawIO class and BaseFromRaw class
* copy/paste from neo/io/exampleio.py
4.Step 4 : IO test
* create a file in neo/test/iotest with the same previous name with "test_" prefix
* copy/paste from neo/test/iotest/test_exampleio.py
"""
from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
_event_channel_dtype)
import numpy as np
class ExampleRawIO(BaseRawIO):
"""
Class for "reading" fake data from an imaginary file.
For the user, it give acces to raw data (signals, event, spikes) as they
are in the (fake) file int16 and int64.
For a developer, it is just an example showing guidelines for someone who wants
to develop a new IO module.
Two rules for developers:
* Respect the :ref:`neo_rawio_API`
* Follow the :ref:`io_guiline`
This fake IO:
* have 2 blocks
* blocks have 2 and 3 segments
* have 16 signal_channel sample_rate = 10000
* have 3 unit_channel
* have 2 event channel: one have *type=event*, the other have
*type=epoch*
Usage:
>>> import neo.rawio
>>> r = neo.rawio.ExampleRawIO(filename='itisafake.nof')
>>> r.parse_header()
>>> print(r)
>>> raw_chunk = r.get_analogsignal_chunk(block_index=0, seg_index=0,
i_start=0, i_stop=1024, channel_names=channel_names)
>>> float_chunk = reader.rescale_signal_raw_to_float(raw_chunk, dtype='float64',
channel_indexes=[0, 3, 6])
>>> spike_timestamp = reader.spike_timestamps(unit_index=0, t_start=None, t_stop=None)
>>> spike_times = reader.rescale_spike_timestamp(spike_timestamp, 'float64')
>>> ev_timestamps, _, ev_labels = reader.event_timestamps(event_channel_index=0)
"""
extensions = ['fake']
rawmode = 'one-file'
def __init__(self, filename=''):
BaseRawIO.__init__(self)
# note that this filename is ued in self._source_name
self.filename = filename
def _source_name(self):
# this function is used by __repr__
# for general cases self.filename is good
# But for URL you could mask some part of the URL to keep
# the main part.
return self.filename
def _parse_header(self):
# This is the central of a RawIO
# we need to collect in the original format all
# informations needed for further fast acces
# at any place in the file
# In short _parse_header can be slow but
# _get_analogsignal_chunk need to be as fast as possible
# create signals channels information
# This is mandatory!!!!
# gain/offset/units are really important because
# the scaling to real value will be done with that
# at the end real_signal = (raw_signal* gain + offset) * pq.Quantity(units)
sig_channels = []
for c in range(16):
ch_name = 'ch{}'.format(c)
# our channel id is c+1 just for fun
# Note that chan_id should be realated to
# original channel id in the file format
# so that the end user should not be lost when reading datasets
chan_id = c + 1
sr = 10000. # Hz
dtype = 'int16'
units = 'uV'
gain = 1000. / 2 ** 16
offset = 0.
# group_id isonly for special cases when channel have diferents
# sampling rate for instance. See TdtIO for that.
# Here this is the general case :all channel have the same characteritics
group_id = 0
sig_channels.append((ch_name, chan_id, sr, dtype, units, gain, offset, group_id))
sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
# creating units channels
# This is mandatory!!!!
# Note that if there is no waveform at all in the file
# then wf_units/wf_gain/wf_offset/wf_left_sweep/wf_sampling_rate
# can be set to any value because _spike_raw_waveforms
# will return None
unit_channels = []
for c in range(3):
unit_name = 'unit{}'.format(c)
unit_id = '#{}'.format(c)
wf_units = 'uV'
wf_gain = 1000. / 2 ** 16
wf_offset = 0.
wf_left_sweep = 20
wf_sampling_rate = 10000.
unit_channels.append((unit_name, unit_id, wf_units, wf_gain,
wf_offset, wf_left_sweep, wf_sampling_rate))
unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
# creating event/epoch channel
# This is mandatory!!!!
# In RawIO epoch and event they are dealt the same way.
event_channels = []
event_channels.append(('Some events', 'ev_0', 'event'))
event_channels.append(('Some epochs', 'ep_1', 'epoch'))
event_channels = np.array(event_channels, dtype=_event_channel_dtype)
# fille into header dict
# This is mandatory!!!!!
self.header = {}
self.header['nb_block'] = 2
self.header['nb_segment'] = [2, 3]
self.header['signal_channels'] = sig_channels
self.header['unit_channels'] = unit_channels
self.header['event_channels'] = event_channels
# insert some annotation at some place
# at neo.io level IO are free to add some annoations
# to any object. To keep this functionality with the wrapper
# BaseFromRaw you can add annoations in a nested dict.
self._generate_minimal_annotations()
# If you are a lazy dev you can stop here.
for block_index in range(2):
bl_ann = self.raw_annotations['blocks'][block_index]
bl_ann['name'] = 'Block #{}'.format(block_index)
bl_ann['block_extra_info'] = 'This is the block {}'.format(block_index)
for seg_index in range([2, 3][block_index]):
seg_ann = bl_ann['segments'][seg_index]
seg_ann['name'] = 'Seg #{} Block #{}'.format(
seg_index, block_index)
seg_ann['seg_extra_info'] = 'This is the seg {} of block {}'.format(
seg_index, block_index)
for c in range(16):
anasig_an = seg_ann['signals'][c]
anasig_an['info'] = 'This is a good signals'
for c in range(3):
spiketrain_an = seg_ann['units'][c]
spiketrain_an['quality'] = 'Good!!'
for c in range(2):
event_an = seg_ann['events'][c]
if c == 0:
event_an['nickname'] = 'Miss Event 0'
elif c == 1:
event_an['nickname'] = 'MrEpoch 1'
def _segment_t_start(self, block_index, seg_index):
# this must return an float scale in second
# this t_start will be shared by all object in the segment
# except AnalogSignal
all_starts = [[0., 15.], [0., 20., 60.]]
return all_starts[block_index][seg_index]
def _segment_t_stop(self, block_index, seg_index):
# this must return an float scale in second
all_stops = [[10., 25.], [10., 30., 70.]]
return all_stops[block_index][seg_index]
def _get_signal_size(self, block_index, seg_index, channel_indexes=None):
# we are lucky: signals in all segment have the same shape!! (10.0 seconds)
# it is not always the case
# this must return an int = the number of sample
# Note that channel_indexes can be ignored for most cases
# except for several sampling rate.
return 100000
def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
# This give the t_start of signals.
# Very often this equal to _segment_t_start but not
# always.
# this must return an float scale in second
# Note that channel_indexes can be ignored for most cases
# except for several sampling rate.
# Here this is the same.
# this is not always the case
return self._segment_t_start(block_index, seg_index)
def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
# this must return a signal chunk limited with
# i_start/i_stop (can be None)
# channel_indexes can be None (=all channel) or a list or numpy.array
# This must return a numpy array 2D (even with one channel).
# This must return the orignal dtype. No conversion here.
# This must as fast as possible.
# Everything that can be done in _parse_header() must not be here.
# Here we are lucky: our signals is always zeros!!
# it is not always the case
# internally signals are int16
# convertion to real units is done with self.header['signal_channels']
if i_start is None:
i_start = 0
if i_stop is None:
i_stop = 100000
assert i_start >= 0, "I don't like your jokes"
assert i_stop <= 100000, "I don't like your jokes"
if channel_indexes is None:
nb_chan = 16
else:
nb_chan = len(channel_indexes)
raw_signals = np.zeros((i_stop - i_start, nb_chan), dtype='int16')
return raw_signals
def _spike_count(self, block_index, seg_index, unit_index):
# Must return the nb of spike for given (block_index, seg_index, unit_index)
# we are lucky: our units have all the same nb of spikes!!
# it is not always the case
nb_spikes = 20
return nb_spikes
def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
# In our IO, timstamp are internally coded 'int64' and they
# represent the index of the signals 10kHz
# we are lucky: spikes have the same discharge in all segments!!
# incredible neuron!! This is not always the case
# the same clip t_start/t_start must be used in _spike_raw_waveforms()
ts_start = (self._segment_t_start(block_index, seg_index) * 10000)
spike_timestamps = np.arange(0, 10000, 500) + ts_start
if t_start is not None or t_stop is not None:
# restricte spikes to given limits (in seconds)
lim0 = int(t_start * 10000)
lim1 = int(t_stop * 10000)
mask = (spike_timestamps >= lim0) & (spike_timestamps <= lim1)
spike_timestamps = spike_timestamps[mask]
return spike_timestamps
def _rescale_spike_timestamp(self, spike_timestamps, dtype):
# must rescale to second a particular spike_timestamps
# with a fixed dtype so the user can choose the precisino he want.
spike_times = spike_timestamps.astype(dtype)
spike_times /= 10000. # because 10kHz
return spike_times
def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
# this must return a 3D numpy array (nb_spike, nb_channel, nb_sample)
# in the original dtype
# this must be as fast as possible.
# the same clip t_start/t_start must be used in _spike_timestamps()
# If there there is no waveform supported in the
# IO them _spike_raw_waveforms must return None
# In our IO waveforms come from all channels
# they are int16
# convertion to real units is done with self.header['unit_channels']
# Here, we have a realistic case: all waveforms are only noise.
# it is not always the case
# we 20 spikes with a sweep of 50 (5ms)
# trick to get how many spike in the slice
ts = self._get_spike_timestamps(block_index, seg_index, unit_index, t_start, t_stop)
nb_spike = ts.size
np.random.seed(2205) # a magic number (my birthday)
waveforms = np.random.randint(low=-2**4, high=2**4, size=nb_spike * 50, dtype='int16')
waveforms = waveforms.reshape(nb_spike, 1, 50)
return waveforms
def _event_count(self, block_index, seg_index, event_channel_index):
# event and spike are very similar
# we have 2 event channels
if event_channel_index == 0:
# event channel
return 6
elif event_channel_index == 1:
# epoch channel
return 10
def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
# the main difference between spike channel and event channel
# is that for here we have 3 numpy array timestamp, durations, labels
# durations must be None for 'event'
# label must a dtype ='U'
# in our IO event are directly coded in seconds
seg_t_start = self._segment_t_start(block_index, seg_index)
if event_channel_index == 0:
timestamp = np.arange(0, 6, dtype='float64') + seg_t_start
durations = None
labels = np.array(['trigger_a', 'trigger_b'] * 3, dtype='U12')
elif event_channel_index == 1:
timestamp = np.arange(0, 10, dtype='float64') + .5 + seg_t_start
durations = np.ones((10), dtype='float64') * .25
labels = np.array(['zoneX'] * 5 + ['zoneZ'] * 5, dtype='U12')
if t_start is not None:
keep = timestamp >= t_start
timestamp, labels = timestamp[keep], labels[keep]
if durations is not None:
durations = durations[keep]
if t_stop is not None:
keep = timestamp <= t_stop
timestamp, labels = timestamp[keep], labels[keep]
if durations is not None:
durations = durations[keep]
return timestamp, durations, labels
def _rescale_event_timestamp(self, event_timestamps, dtype):
# must rescale to second a particular event_timestamps
# with a fixed dtype so the user can choose the precisino he want.
# really easy here because in our case it is already seconds
event_times = event_timestamps.astype(dtype)
return event_times
def _rescale_epoch_duration(self, raw_duration, dtype):
# really easy here because in our case it is already seconds
durations = raw_duration.astype(dtype)
return durations
|
|
#!/usr/bin/env python
# Siconos is a program dedicated to modeling, simulation and control
# of non smooth dynamical systems.
#
# Copyright 2016 INRIA.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
do_plot = True
try:
import matplotlib
except:
do_plot = False
if do_plot:
import os, sys
if sys.platform=='linux' and (not 'DISPLAY' in os.environ
or len(os.environ['DISPLAY'])==0):
matplotlib.use('Agg')
from matplotlib.pyplot import \
subplot, title, plot, grid, show, savefig, ylim
from siconos.kernel import \
NonSmoothDynamicalSystem, MoreauJeanOSI, TimeDiscretisation, \
FrictionContact, NewtonImpactFrictionNSL, TimeStepping
import siconos.kernel as sk
from siconos.mechanics.collision.bullet import \
SiconosBulletCollisionManager
from siconos.mechanics.collision import \
SiconosBox, SiconosPlane, BodyDS, SiconosContactor, SiconosContactorSet
from numpy import zeros
from numpy.linalg import norm
t0 = 0 # start time
T = 20 # end time
h = 0.005 # time step
g = 9.81 # gravity
theta = 0.5 # theta scheme
#
# dynamical system
#
position_init = 10
velocity_init = 0
# a box shape
box1 = SiconosBox(1.0, 1.0, 1.0)
# A Bullet Dynamical System : a shape + a mass (1.0) + position and velocity
body = BodyDS([0, 0, position_init, 1., 0, 0, 0],
[0, 0, velocity_init, 0., 0., 0.],
1.0)
# Add the shape, wrapped in a SiconosContactor, to the body's
# contactor set.
body.contactors().push_back(SiconosContactor(box1))
# set external forces
weight = [0, 0, -body.scalarMass() * g]
body.setFExtPtr(weight)
#
# Model
#
bouncingBox = NonSmoothDynamicalSystem(t0, T)
# add the dynamical system to the non smooth dynamical system
bouncingBox.insertDynamicalSystem(body)
#
# Simulation
#
# (1) OneStepIntegrators
osi = MoreauJeanOSI(theta)
ground = SiconosPlane()
groundOffset = [0,0,-0.5,1,0,0,0]
# (2) Time discretisation --
timedisc = TimeDiscretisation(t0, h)
# (3) one step non smooth problem
osnspb = FrictionContact(3)
osnspb.numericsSolverOptions().iparam[0] = 1000
osnspb.numericsSolverOptions().dparam[0] = 1e-5
osnspb.setMaxSize(16384)
osnspb.setMStorageType(1)
osnspb.setNumericsVerboseMode(False)
# keep previous solution
osnspb.setKeepLambdaAndYState(True)
# (4) non smooth law
nslaw = NewtonImpactFrictionNSL(0.8, 0., 0., 3)
# (5) broadphase contact detection
broadphase = SiconosBulletCollisionManager()
# insert a non smooth law for contactors id 0
broadphase.insertNonSmoothLaw(nslaw, 0, 0)
# The ground is a static object
# we give it a group contactor id : 0
scs = SiconosContactorSet()
scs.append(SiconosContactor(ground))
broadphase.insertStaticContactorSet(scs, groundOffset)
# (6) Simulation setup with (1) (2) (3) (4) (5)
simulation = TimeStepping(bouncingBox, timedisc)
simulation.insertInteractionManager(broadphase)
simulation.insertIntegrator(osi)
simulation.insertNonSmoothProblem(osnspb)
# Get the values to be plotted
# ->saved in a matrix dataPlot
N = int((T - t0) / h)
dataPlot = zeros((N+1, 4))
#
# numpy pointers on dense Siconos vectors
#
q = body.q()
v = body.velocity()
#
# initial data
#
dataPlot[0, 0] = t0
dataPlot[0, 1] = q[2]
dataPlot[0, 2] = v[2]
k = 1
# time loop
while(simulation.hasNextEvent()):
simulation.computeOneStep()
dataPlot[k, 0] = simulation.nextTime()
dataPlot[k, 1] = q[2]
dataPlot[k, 2] = v[2]
#if (broadphase.collisionWorld().getDispatcher().getNumManifolds() > 0):
if (broadphase.statistics().new_interactions_created +
broadphase.statistics().existing_interactions_processed) > 0:
if bouncingBox.topology().\
numberOfIndexSet() == 2:
index1 = sk.interactions(simulation.indexSet(1))
if (len(index1) == 4):
dataPlot[k, 3] = norm(index1[0].lambda_(1)) + \
norm(index1[1].lambda_(1)) + norm(index1[2].lambda_(1)) + \
norm(index1[3].lambda_(1))
k += 1
simulation.nextStep()
#
# comparison with the reference file
#
from siconos.kernel import SimpleMatrix, getMatrix
from numpy.linalg import norm
ref = getMatrix(SimpleMatrix("result.ref"))
print("norm(dataPlot - ref) = {0}".format(norm(dataPlot - ref)))
if (norm(dataPlot - ref) > 1e-11):
print("Warning. The result is rather different from the reference file.")
#
# plots
#
if do_plot:
subplot(511)
title('position')
plot(dataPlot[0:k, 0], dataPlot[0:k, 1])
y = ylim()
plot(ref[0:k, 0], ref[0:k, 1])
ylim(y)
grid()
subplot(513)
title('velocity')
plot(dataPlot[0:k, 0], dataPlot[0:k, 2])
y = ylim()
plot(ref[0:k, 0], ref[0:k, 2])
ylim(y)
grid()
subplot(515)
plot(dataPlot[0:k, 0], dataPlot[0:k, 3])
y = ylim()
plot(ref[0:k, 0], ref[0:k, 3])
ylim(y)
title('lambda')
grid()
savefig('result.png')
show()
|
|
from __future__ import unicode_literals
import django
from django.db import models
from django.shortcuts import get_object_or_404
from django.test import TestCase
from django.utils import six
from rest_framework import generics, renderers, serializers, status
from rest_framework.test import APIRequestFactory
from tests.models import (
BasicModel, ForeignKeySource, ForeignKeyTarget, RESTFrameworkModel
)
factory = APIRequestFactory()
# Models
class SlugBasedModel(RESTFrameworkModel):
text = models.CharField(max_length=100)
slug = models.SlugField(max_length=32)
# Model for regression test for #285
class Comment(RESTFrameworkModel):
email = models.EmailField()
content = models.CharField(max_length=200)
created = models.DateTimeField(auto_now_add=True)
# Serializers
class BasicSerializer(serializers.ModelSerializer):
class Meta:
model = BasicModel
class ForeignKeySerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeySource
class SlugSerializer(serializers.ModelSerializer):
slug = serializers.ReadOnlyField()
class Meta:
model = SlugBasedModel
fields = ('text', 'slug')
# Views
class RootView(generics.ListCreateAPIView):
queryset = BasicModel.objects.all()
serializer_class = BasicSerializer
class InstanceView(generics.RetrieveUpdateDestroyAPIView):
queryset = BasicModel.objects.exclude(text='filtered out')
serializer_class = BasicSerializer
class FKInstanceView(generics.RetrieveUpdateDestroyAPIView):
queryset = ForeignKeySource.objects.all()
serializer_class = ForeignKeySerializer
class SlugBasedInstanceView(InstanceView):
"""
A model with a slug-field.
"""
queryset = SlugBasedModel.objects.all()
serializer_class = SlugSerializer
lookup_field = 'slug'
# Tests
class TestRootView(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
self.view = RootView.as_view()
def test_get_root_view(self):
"""
GET requests to ListCreateAPIView should return list of objects.
"""
request = factory.get('/')
with self.assertNumQueries(1):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data)
def test_post_root_view(self):
"""
POST requests to ListCreateAPIView should create a new object.
"""
data = {'text': 'foobar'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(1):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, {'id': 4, 'text': 'foobar'})
created = self.objects.get(id=4)
self.assertEqual(created.text, 'foobar')
def test_put_root_view(self):
"""
PUT requests to ListCreateAPIView should not be allowed
"""
data = {'text': 'foobar'}
request = factory.put('/', data, format='json')
with self.assertNumQueries(0):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(response.data, {"detail": 'Method "PUT" not allowed.'})
def test_delete_root_view(self):
"""
DELETE requests to ListCreateAPIView should not be allowed
"""
request = factory.delete('/')
with self.assertNumQueries(0):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(response.data, {"detail": 'Method "DELETE" not allowed.'})
def test_post_cannot_set_id(self):
"""
POST requests to create a new object should not be able to set the id.
"""
data = {'id': 999, 'text': 'foobar'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(1):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, {'id': 4, 'text': 'foobar'})
created = self.objects.get(id=4)
self.assertEqual(created.text, 'foobar')
def test_post_error_root_view(self):
"""
POST requests to ListCreateAPIView in HTML should include a form error.
"""
data = {'text': 'foobar' * 100}
request = factory.post('/', data, HTTP_ACCEPT='text/html')
response = self.view(request).render()
expected_error = '<span class="help-block">Ensure this field has no more than 100 characters.</span>'
self.assertIn(expected_error, response.rendered_content.decode('utf-8'))
EXPECTED_QUERIES_FOR_PUT = 3 if django.VERSION < (1, 6) else 2
class TestInstanceView(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz', 'filtered out']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects.exclude(text='filtered out')
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
self.view = InstanceView.as_view()
self.slug_based_view = SlugBasedInstanceView.as_view()
def test_get_instance_view(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should return a single object.
"""
request = factory.get('/1')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data[0])
def test_post_instance_view(self):
"""
POST requests to RetrieveUpdateDestroyAPIView should not be allowed
"""
data = {'text': 'foobar'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(0):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(response.data, {"detail": 'Method "POST" not allowed.'})
def test_put_instance_view(self):
"""
PUT requests to RetrieveUpdateDestroyAPIView should update an object.
"""
data = {'text': 'foobar'}
request = factory.put('/1', data, format='json')
with self.assertNumQueries(EXPECTED_QUERIES_FOR_PUT):
response = self.view(request, pk='1').render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(dict(response.data), {'id': 1, 'text': 'foobar'})
updated = self.objects.get(id=1)
self.assertEqual(updated.text, 'foobar')
def test_patch_instance_view(self):
"""
PATCH requests to RetrieveUpdateDestroyAPIView should update an object.
"""
data = {'text': 'foobar'}
request = factory.patch('/1', data, format='json')
with self.assertNumQueries(EXPECTED_QUERIES_FOR_PUT):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'id': 1, 'text': 'foobar'})
updated = self.objects.get(id=1)
self.assertEqual(updated.text, 'foobar')
def test_delete_instance_view(self):
"""
DELETE requests to RetrieveUpdateDestroyAPIView should delete an object.
"""
request = factory.delete('/1')
with self.assertNumQueries(2):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(response.content, six.b(''))
ids = [obj.id for obj in self.objects.all()]
self.assertEqual(ids, [2, 3])
def test_get_instance_view_incorrect_arg(self):
"""
GET requests with an incorrect pk type, should raise 404, not 500.
Regression test for #890.
"""
request = factory.get('/a')
with self.assertNumQueries(0):
response = self.view(request, pk='a').render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_put_cannot_set_id(self):
"""
PUT requests to create a new object should not be able to set the id.
"""
data = {'id': 999, 'text': 'foobar'}
request = factory.put('/1', data, format='json')
with self.assertNumQueries(EXPECTED_QUERIES_FOR_PUT):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'id': 1, 'text': 'foobar'})
updated = self.objects.get(id=1)
self.assertEqual(updated.text, 'foobar')
def test_put_to_deleted_instance(self):
"""
PUT requests to RetrieveUpdateDestroyAPIView should return 404 if
an object does not currently exist.
"""
self.objects.get(id=1).delete()
data = {'text': 'foobar'}
request = factory.put('/1', data, format='json')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_put_to_filtered_out_instance(self):
"""
PUT requests to an URL of instance which is filtered out should not be
able to create new objects.
"""
data = {'text': 'foo'}
filtered_out_pk = BasicModel.objects.filter(text='filtered out')[0].pk
request = factory.put('/{0}'.format(filtered_out_pk), data, format='json')
response = self.view(request, pk=filtered_out_pk).render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_patch_cannot_create_an_object(self):
"""
PATCH requests should not be able to create objects.
"""
data = {'text': 'foobar'}
request = factory.patch('/999', data, format='json')
with self.assertNumQueries(1):
response = self.view(request, pk=999).render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertFalse(self.objects.filter(id=999).exists())
def test_put_error_instance_view(self):
"""
Incorrect PUT requests in HTML should include a form error.
"""
data = {'text': 'foobar' * 100}
request = factory.put('/', data, HTTP_ACCEPT='text/html')
response = self.view(request, pk=1).render()
expected_error = '<span class="help-block">Ensure this field has no more than 100 characters.</span>'
self.assertIn(expected_error, response.rendered_content.decode('utf-8'))
class TestFKInstanceView(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
t = ForeignKeyTarget(name=item)
t.save()
ForeignKeySource(name='source_' + item, target=t).save()
self.objects = ForeignKeySource.objects
self.data = [
{'id': obj.id, 'name': obj.name}
for obj in self.objects.all()
]
self.view = FKInstanceView.as_view()
class TestOverriddenGetObject(TestCase):
"""
Test cases for a RetrieveUpdateDestroyAPIView that does NOT use the
queryset/model mechanism but instead overrides get_object()
"""
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
class OverriddenGetObjectView(generics.RetrieveUpdateDestroyAPIView):
"""
Example detail view for override of get_object().
"""
serializer_class = BasicSerializer
def get_object(self):
pk = int(self.kwargs['pk'])
return get_object_or_404(BasicModel.objects.all(), id=pk)
self.view = OverriddenGetObjectView.as_view()
def test_overridden_get_object_view(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should return a single object.
"""
request = factory.get('/1')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data[0])
# Regression test for #285
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
exclude = ('created',)
class CommentView(generics.ListCreateAPIView):
serializer_class = CommentSerializer
model = Comment
class TestCreateModelWithAutoNowAddField(TestCase):
def setUp(self):
self.objects = Comment.objects
self.view = CommentView.as_view()
def test_create_model_with_auto_now_add_field(self):
"""
Regression test for #285
https://github.com/tomchristie/django-rest-framework/issues/285
"""
data = {'email': 'foobar@example.com', 'content': 'foobar'}
request = factory.post('/', data, format='json')
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
created = self.objects.get(id=1)
self.assertEqual(created.content, 'foobar')
# Test for particularly ugly regression with m2m in browsable API
class ClassB(models.Model):
name = models.CharField(max_length=255)
class ClassA(models.Model):
name = models.CharField(max_length=255)
children = models.ManyToManyField(ClassB, blank=True, null=True)
class ClassASerializer(serializers.ModelSerializer):
children = serializers.PrimaryKeyRelatedField(
many=True, queryset=ClassB.objects.all()
)
class Meta:
model = ClassA
class ExampleView(generics.ListCreateAPIView):
serializer_class = ClassASerializer
queryset = ClassA.objects.all()
class TestM2MBrowsableAPI(TestCase):
def test_m2m_in_browsable_api(self):
"""
Test for particularly ugly regression with m2m in browsable API
"""
request = factory.get('/', HTTP_ACCEPT='text/html')
view = ExampleView().as_view()
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
class InclusiveFilterBackend(object):
def filter_queryset(self, request, queryset, view):
return queryset.filter(text='foo')
class ExclusiveFilterBackend(object):
def filter_queryset(self, request, queryset, view):
return queryset.filter(text='other')
class TwoFieldModel(models.Model):
field_a = models.CharField(max_length=100)
field_b = models.CharField(max_length=100)
class DynamicSerializerView(generics.ListCreateAPIView):
queryset = TwoFieldModel.objects.all()
renderer_classes = (renderers.BrowsableAPIRenderer, renderers.JSONRenderer)
def get_serializer_class(self):
if self.request.method == 'POST':
class DynamicSerializer(serializers.ModelSerializer):
class Meta:
model = TwoFieldModel
fields = ('field_b',)
else:
class DynamicSerializer(serializers.ModelSerializer):
class Meta:
model = TwoFieldModel
return DynamicSerializer
class TestFilterBackendAppliedToViews(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances to filter on.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
def test_get_root_view_filters_by_name_with_filter_backend(self):
"""
GET requests to ListCreateAPIView should return filtered list.
"""
root_view = RootView.as_view(filter_backends=(InclusiveFilterBackend,))
request = factory.get('/')
response = root_view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data, [{'id': 1, 'text': 'foo'}])
def test_get_root_view_filters_out_all_models_with_exclusive_filter_backend(self):
"""
GET requests to ListCreateAPIView should return empty list when all models are filtered out.
"""
root_view = RootView.as_view(filter_backends=(ExclusiveFilterBackend,))
request = factory.get('/')
response = root_view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, [])
def test_get_instance_view_filters_out_name_with_filter_backend(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should raise 404 when model filtered out.
"""
instance_view = InstanceView.as_view(filter_backends=(ExclusiveFilterBackend,))
request = factory.get('/1')
response = instance_view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data, {'detail': 'Not found.'})
def test_get_instance_view_will_return_single_object_when_filter_does_not_exclude_it(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should return a single object when not excluded
"""
instance_view = InstanceView.as_view(filter_backends=(InclusiveFilterBackend,))
request = factory.get('/1')
response = instance_view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'id': 1, 'text': 'foo'})
def test_dynamic_serializer_form_in_browsable_api(self):
"""
GET requests to ListCreateAPIView should return filtered list.
"""
view = DynamicSerializerView.as_view()
request = factory.get('/')
response = view(request).render()
self.assertContains(response, 'field_b')
self.assertNotContains(response, 'field_a')
|
|
from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises
from nose.tools import assert_raises
from moto.ec2 import ec2_backends
import boto
from boto.exception import EC2ResponseError
import sure # noqa
from moto import mock_ec2
@mock_ec2
def test_create_and_delete_volume():
conn = boto.connect_ec2('the_key', 'the_secret')
volume = conn.create_volume(80, "us-east-1a")
all_volumes = conn.get_all_volumes()
all_volumes.should.have.length_of(1)
all_volumes[0].size.should.equal(80)
all_volumes[0].zone.should.equal("us-east-1a")
volume = all_volumes[0]
volume.delete()
conn.get_all_volumes().should.have.length_of(0)
# Deleting something that was already deleted should throw an error
with assert_raises(EC2ResponseError) as cm:
volume.delete()
cm.exception.code.should.equal('InvalidVolume.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_filter_volume_by_id():
conn = boto.connect_ec2('the_key', 'the_secret')
volume1 = conn.create_volume(80, "us-east-1a")
volume2 = conn.create_volume(36, "us-east-1b")
volume3 = conn.create_volume(20, "us-east-1c")
vol1 = conn.get_all_volumes(volume_ids=volume3.id)
vol1.should.have.length_of(1)
vol1[0].size.should.equal(20)
vol1[0].zone.should.equal('us-east-1c')
vol2 = conn.get_all_volumes(volume_ids=[volume1.id, volume2.id])
vol2.should.have.length_of(2)
@mock_ec2
def test_volume_attach_and_detach():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
volume = conn.create_volume(80, "us-east-1a")
volume.update()
volume.volume_state().should.equal('available')
volume.attach(instance.id, "/dev/sdh")
volume.update()
volume.volume_state().should.equal('in-use')
volume.attach_data.instance_id.should.equal(instance.id)
volume.detach()
volume.update()
volume.volume_state().should.equal('available')
with assert_raises(EC2ResponseError) as cm1:
volume.attach('i-1234abcd', "/dev/sdh")
cm1.exception.code.should.equal('InvalidInstanceID.NotFound')
cm1.exception.status.should.equal(400)
cm1.exception.request_id.should_not.be.none
with assert_raises(EC2ResponseError) as cm2:
conn.detach_volume(volume.id, instance.id, "/dev/sdh")
cm2.exception.code.should.equal('InvalidAttachment.NotFound')
cm2.exception.status.should.equal(400)
cm2.exception.request_id.should_not.be.none
with assert_raises(EC2ResponseError) as cm3:
conn.detach_volume(volume.id, 'i-1234abcd', "/dev/sdh")
cm3.exception.code.should.equal('InvalidInstanceID.NotFound')
cm3.exception.status.should.equal(400)
cm3.exception.request_id.should_not.be.none
@mock_ec2
def test_create_snapshot():
conn = boto.connect_ec2('the_key', 'the_secret')
volume = conn.create_volume(80, "us-east-1a")
volume.create_snapshot('a test snapshot')
snapshots = conn.get_all_snapshots()
snapshots.should.have.length_of(1)
snapshots[0].description.should.equal('a test snapshot')
snapshots[0].start_time.should_not.be.none
# Create snapshot without description
snapshot = volume.create_snapshot()
conn.get_all_snapshots().should.have.length_of(2)
snapshot.delete()
conn.get_all_snapshots().should.have.length_of(1)
# Deleting something that was already deleted should throw an error
with assert_raises(EC2ResponseError) as cm:
snapshot.delete()
cm.exception.code.should.equal('InvalidSnapshot.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_filter_snapshot_by_id():
conn = boto.connect_ec2('the_key', 'the_secret')
volume1 = conn.create_volume(36, "us-east-1a")
snap1 = volume1.create_snapshot('a test snapshot 1')
volume2 = conn.create_volume(42, 'us-east-1a')
snap2 = volume2.create_snapshot('a test snapshot 2')
volume3 = conn.create_volume(84, 'us-east-1a')
snap3 = volume3.create_snapshot('a test snapshot 3')
snapshots1 = conn.get_all_snapshots(snapshot_ids=snap2.id)
snapshots1.should.have.length_of(1)
snapshots1[0].volume_id.should.equal(volume2.id)
snapshots1[0].region.name.should.equal(conn.region.name)
snapshots2 = conn.get_all_snapshots(snapshot_ids=[snap2.id, snap3.id])
snapshots2.should.have.length_of(2)
for s in snapshots2:
s.start_time.should_not.be.none
s.volume_id.should.be.within([volume2.id, volume3.id])
s.region.name.should.equal(conn.region.name)
@mock_ec2
def test_snapshot_attribute():
conn = boto.connect_ec2('the_key', 'the_secret')
volume = conn.create_volume(80, "us-east-1a")
snapshot = volume.create_snapshot()
# Baseline
attributes = conn.get_snapshot_attribute(snapshot.id, attribute='createVolumePermission')
attributes.name.should.equal('create_volume_permission')
attributes.attrs.should.have.length_of(0)
ADD_GROUP_ARGS = {'snapshot_id': snapshot.id,
'attribute': 'createVolumePermission',
'operation': 'add',
'groups': 'all'}
REMOVE_GROUP_ARGS = {'snapshot_id': snapshot.id,
'attribute': 'createVolumePermission',
'operation': 'remove',
'groups': 'all'}
# Add 'all' group and confirm
conn.modify_snapshot_attribute(**ADD_GROUP_ARGS)
attributes = conn.get_snapshot_attribute(snapshot.id, attribute='createVolumePermission')
attributes.attrs['groups'].should.have.length_of(1)
attributes.attrs['groups'].should.equal(['all'])
# Add is idempotent
conn.modify_snapshot_attribute.when.called_with(**ADD_GROUP_ARGS).should_not.throw(EC2ResponseError)
# Remove 'all' group and confirm
conn.modify_snapshot_attribute(**REMOVE_GROUP_ARGS)
attributes = conn.get_snapshot_attribute(snapshot.id, attribute='createVolumePermission')
attributes.attrs.should.have.length_of(0)
# Remove is idempotent
conn.modify_snapshot_attribute.when.called_with(**REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError)
# Error: Add with group != 'all'
with assert_raises(EC2ResponseError) as cm:
conn.modify_snapshot_attribute(snapshot.id,
attribute='createVolumePermission',
operation='add',
groups='everyone')
cm.exception.code.should.equal('InvalidAMIAttributeItemValue')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Error: Add with invalid snapshot ID
with assert_raises(EC2ResponseError) as cm:
conn.modify_snapshot_attribute("snapshot-abcd1234",
attribute='createVolumePermission',
operation='add',
groups='all')
cm.exception.code.should.equal('InvalidSnapshot.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Error: Remove with invalid snapshot ID
with assert_raises(EC2ResponseError) as cm:
conn.modify_snapshot_attribute("snapshot-abcd1234",
attribute='createVolumePermission',
operation='remove',
groups='all')
cm.exception.code.should.equal('InvalidSnapshot.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Error: Add or remove with user ID instead of group
conn.modify_snapshot_attribute.when.called_with(snapshot.id,
attribute='createVolumePermission',
operation='add',
user_ids=['user']).should.throw(NotImplementedError)
conn.modify_snapshot_attribute.when.called_with(snapshot.id,
attribute='createVolumePermission',
operation='remove',
user_ids=['user']).should.throw(NotImplementedError)
@mock_ec2
def test_modify_attribute_blockDeviceMapping():
"""
Reproduces the missing feature explained at [0], where we want to mock a
call to modify an instance attribute of type: blockDeviceMapping.
[0] https://github.com/spulec/moto/issues/160
"""
conn = boto.ec2.connect_to_region("us-east-1")
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True})
instance = ec2_backends[conn.region.name].get_instance(instance.id)
instance.block_device_mapping.should.have.key('/dev/sda1')
instance.block_device_mapping['/dev/sda1'].delete_on_termination.should.be(True)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from external.wip import work_in_progress
from rmgpy.molecule.group import ActionError, GroupAtom, GroupBond, Group
from rmgpy.molecule.atomtype import atomTypes
################################################################################
class TestGroupAtom(unittest.TestCase):
"""
Contains unit tests of the GroupAtom class.
"""
def setUp(self):
"""
A method called before each unit test in this class.
"""
self.atom = GroupAtom(atomType=[atomTypes['Cd']], radicalElectrons=[1], charge=[0], label='*1')
def testApplyActionBreakBond(self):
"""
Test the GroupAtom.applyAction() method for a BREAK_BOND action.
"""
action = ['BREAK_BOND', '*1', 'S', '*2']
for label, atomType in atomTypes.iteritems():
atom0 = GroupAtom(atomType=[atomType], radicalElectrons=[1], charge=[0], label='*1')
atom = atom0.copy()
try:
atom.applyAction(action)
self.assertEqual(len(atom.atomType), len(atomType.breakBond))
for a in atomType.breakBond:
self.assertTrue(a in atom.atomType)
self.assertEqual(atom0.radicalElectrons, atom.radicalElectrons)
self.assertEqual(atom0.charge, atom.charge)
self.assertEqual(atom0.label, atom.label)
except ActionError:
self.assertEqual(len(atomType.breakBond), 0)
def testApplyActionFormBond(self):
"""
Test the GroupAtom.applyAction() method for a FORM_BOND action.
"""
action = ['FORM_BOND', '*1', 'S', '*2']
for label, atomType in atomTypes.iteritems():
atom0 = GroupAtom(atomType=[atomType], radicalElectrons=[1], charge=[0], label='*1')
atom = atom0.copy()
try:
atom.applyAction(action)
self.assertEqual(len(atom.atomType), len(atomType.formBond))
for a in atomType.formBond:
self.assertTrue(a in atom.atomType)
self.assertEqual(atom0.radicalElectrons, atom.radicalElectrons)
self.assertEqual(atom0.charge, atom.charge)
self.assertEqual(atom0.label, atom.label)
except ActionError:
self.assertEqual(len(atomType.formBond), 0)
def testApplyActionIncrementBond(self):
"""
Test the GroupAtom.applyAction() method for a CHANGE_BOND action.
"""
action = ['CHANGE_BOND', '*1', 1, '*2']
for label, atomType in atomTypes.iteritems():
atom0 = GroupAtom(atomType=[atomType], radicalElectrons=[1], charge=[0], label='*1')
atom = atom0.copy()
try:
atom.applyAction(action)
self.assertEqual(len(atom.atomType), len(atomType.incrementBond))
for a in atomType.incrementBond:
self.assertTrue(a in atom.atomType)
self.assertEqual(atom0.radicalElectrons, atom.radicalElectrons)
self.assertEqual(atom0.charge, atom.charge)
self.assertEqual(atom0.label, atom.label)
except ActionError:
self.assertEqual(len(atomType.incrementBond), 0)
def testApplyActionDecrementBond(self):
"""
Test the GroupAtom.applyAction() method for a CHANGE_BOND action.
"""
action = ['CHANGE_BOND', '*1', -1, '*2']
for label, atomType in atomTypes.iteritems():
atom0 = GroupAtom(atomType=[atomType], radicalElectrons=[1], charge=[0], label='*1')
atom = atom0.copy()
try:
atom.applyAction(action)
self.assertEqual(len(atom.atomType), len(atomType.decrementBond))
for a in atomType.decrementBond:
self.assertTrue(a in atom.atomType)
self.assertEqual(atom0.radicalElectrons, atom.radicalElectrons)
self.assertEqual(atom0.charge, atom.charge)
self.assertEqual(atom0.label, atom.label)
except ActionError:
self.assertEqual(len(atomType.decrementBond), 0)
def testApplyActionGainRadical(self):
"""
Test the GroupAtom.applyAction() method for a GAIN_RADICAL action.
"""
action = ['GAIN_RADICAL', '*1', 1]
for label, atomType in atomTypes.iteritems():
atom0 = GroupAtom(atomType=[atomType], radicalElectrons=[1], charge=[0], label='*1')
atom = atom0.copy()
try:
atom.applyAction(action)
self.assertEqual(len(atom.atomType), len(atomType.incrementRadical))
for a in atomType.incrementRadical:
self.assertTrue(a in atom.atomType, "GAIN_RADICAL on {0} gave {1} not {2}".format(atomType, atom.atomType, atomType.incrementRadical))
self.assertEqual(atom0.radicalElectrons, [r - 1 for r in atom.radicalElectrons])
self.assertEqual(atom0.charge, atom.charge)
self.assertEqual(atom0.label, atom.label)
except ActionError:
self.assertEqual(len(atomType.incrementRadical), 0)
def testApplyActionLoseRadical(self):
"""
Test the GroupAtom.applyAction() method for a LOSE_RADICAL action.
"""
action = ['LOSE_RADICAL', '*1', 1]
for label, atomType in atomTypes.iteritems():
atom0 = GroupAtom(atomType=[atomType], radicalElectrons=[1], charge=[0], label='*1')
atom = atom0.copy()
try:
atom.applyAction(action)
self.assertEqual(len(atom.atomType), len(atomType.decrementRadical))
for a in atomType.incrementRadical:
self.assertTrue(a in atom.atomType, "LOSE_RADICAL on {0} gave {1} not {2}".format(atomType, atom.atomType, atomType.decrementRadical))
self.assertEqual(atom0.radicalElectrons, [r + 1 for r in atom.radicalElectrons])
self.assertEqual(atom0.charge, atom.charge)
self.assertEqual(atom0.label, atom.label)
except ActionError:
self.assertEqual(len(atomType.decrementRadical), 0)
def testEquivalent(self):
"""
Test the GroupAtom.equivalent() method.
"""
for label1, atomType1 in atomTypes.iteritems():
for label2, atomType2 in atomTypes.iteritems():
atom1 = GroupAtom(atomType=[atomType1], radicalElectrons=[1], charge=[0], label='*1')
atom2 = GroupAtom(atomType=[atomType2], radicalElectrons=[1], charge=[0], label='*1')
if label1 == label2 or atomType2 in atomType1.generic or atomType1 in atomType2.generic:
self.assertTrue(atom1.equivalent(atom2), '{0!s} is not equivalent to {1!s}'.format(atom1, atom2))
self.assertTrue(atom2.equivalent(atom1), '{0!s} is not equivalent to {1!s}'.format(atom2, atom1))
else:
self.assertFalse(atom1.equivalent(atom2), '{0!s} is equivalent to {1!s}'.format(atom1, atom2))
self.assertFalse(atom2.equivalent(atom1), '{0!s} is equivalent to {1!s}'.format(atom2, atom1))
# Now see if charge and radical count are checked properly
for charge in range(3):
for radicals in range(2):
atom3 = GroupAtom(atomType=[atomType1], radicalElectrons=[radicals], charge=[charge], label='*1')
if radicals == 1 and charge == 0:
self.assertTrue(atom1.equivalent(atom3), '{0!s} is not equivalent to {1!s}'.format(atom1, atom3))
self.assertTrue(atom1.equivalent(atom3), '{0!s} is not equivalent to {1!s}'.format(atom3, atom1))
else:
self.assertFalse(atom1.equivalent(atom3), '{0!s} is equivalent to {1!s}'.format(atom1, atom3))
self.assertFalse(atom1.equivalent(atom3), '{0!s} is equivalent to {1!s}'.format(atom3, atom1))
def testIsSpecificCaseOf(self):
"""
Test the GroupAtom.isSpecificCaseOf() method.
"""
for label1, atomType1 in atomTypes.iteritems():
for label2, atomType2 in atomTypes.iteritems():
atom1 = GroupAtom(atomType=[atomType1], radicalElectrons=[1], charge=[0], label='*1')
atom2 = GroupAtom(atomType=[atomType2], radicalElectrons=[1], charge=[0], label='*1')
# And make more generic types of these two atoms
atom1gen = GroupAtom(atomType=[atomType1], radicalElectrons=[0, 1], charge=[0, 1], label='*1')
atom2gen = GroupAtom(atomType=[atomType2], radicalElectrons=[0, 1], charge=[0, 1], label='*1')
if label1 == label2 or atomType2 in atomType1.generic:
self.assertTrue(atom1.isSpecificCaseOf(atom2), '{0!s} is not a specific case of {1!s}'.format(atom1, atom2))
self.assertTrue(atom1.isSpecificCaseOf(atom2gen), '{0!s} is not a specific case of {1!s}'.format(atom1, atom2gen))
self.assertFalse(atom1gen.isSpecificCaseOf(atom2), '{0!s} is a specific case of {1!s}'.format(atom1gen, atom2))
else:
self.assertFalse(atom1.isSpecificCaseOf(atom2), '{0!s} is a specific case of {1!s}'.format(atom1, atom2))
self.assertFalse(atom1.isSpecificCaseOf(atom2gen), '{0!s} is a specific case of {1!s}'.format(atom1, atom2gen))
self.assertFalse(atom1gen.isSpecificCaseOf(atom2), '{0!s} is a specific case of {1!s}'.format(atom1gen, atom2))
def testCopy(self):
"""
Test the GroupAtom.copy() method.
"""
atom = self.atom.copy()
self.assertEqual(len(self.atom.atomType), len(atom.atomType))
self.assertEqual(self.atom.atomType[0].label, atom.atomType[0].label)
self.assertEqual(self.atom.radicalElectrons, atom.radicalElectrons)
self.assertEqual(self.atom.charge, atom.charge)
self.assertEqual(self.atom.label, atom.label)
def testPickle(self):
"""
Test that a GroupAtom object can be successfully pickled and
unpickled with no loss of information.
"""
import cPickle
atom = cPickle.loads(cPickle.dumps(self.atom))
self.assertEqual(len(self.atom.atomType), len(atom.atomType))
self.assertEqual(self.atom.atomType[0].label, atom.atomType[0].label)
self.assertEqual(self.atom.radicalElectrons, atom.radicalElectrons)
self.assertEqual(self.atom.charge, atom.charge)
self.assertEqual(self.atom.label, atom.label)
################################################################################
class TestGroupBond(unittest.TestCase):
"""
Contains unit tests of the GroupBond class.
"""
def setUp(self):
"""
A method called before each unit test in this class.
"""
self.bond = GroupBond(None, None, order=['D'])
self.orderList = [['S'], ['D'], ['T'], ['B'], ['S','D'], ['D','S'], ['D','T'], ['S','D','T']]
def testApplyActionBreakBond(self):
"""
Test the GroupBond.applyAction() method for a BREAK_BOND action.
"""
action = ['BREAK_BOND', '*1', 'S', '*2']
for order0 in self.orderList:
bond0 = GroupBond(None, None, order=order0)
bond = bond0.copy()
try:
bond.applyAction(action)
self.fail('GroupBond.applyAction() unexpectedly processed a BREAK_BOND action.')
except ActionError:
pass
def testApplyActionFormBond(self):
"""
Test the GroupBond.applyAction() method for a FORM_BOND action.
"""
action = ['FORM_BOND', '*1', 'S', '*2']
for order0 in self.orderList:
bond0 = GroupBond(None, None, order=order0)
bond = bond0.copy()
try:
bond.applyAction(action)
self.fail('GroupBond.applyAction() unexpectedly processed a FORM_BOND action.')
except ActionError:
pass
def testApplyActionIncrementBond(self):
"""
Test the GroupBond.applyAction() method for a CHANGE_BOND action.
"""
action = ['CHANGE_BOND', '*1', 1, '*2']
for order0 in self.orderList:
bond0 = GroupBond(None, None, order=order0)
bond = bond0.copy()
try:
bond.applyAction(action)
except ActionError:
self.assertTrue('T' in order0 or 'B' in order0)
def testApplyActionDecrementBond(self):
"""
Test the GroupBond.applyAction() method for a CHANGE_BOND action.
"""
action = ['CHANGE_BOND', '*1', -1, '*2']
for order0 in self.orderList:
bond0 = GroupBond(None, None, order=order0)
bond = bond0.copy()
try:
bond.applyAction(action)
except ActionError:
self.assertTrue('S' in order0 or 'B' in order0)
def testApplyActionGainRadical(self):
"""
Test the GroupBond.applyAction() method for a GAIN_RADICAL action.
"""
action = ['GAIN_RADICAL', '*1', 1]
for order0 in self.orderList:
bond0 = GroupBond(None, None, order=order0)
bond = bond0.copy()
try:
bond.applyAction(action)
self.fail('GroupBond.applyAction() unexpectedly processed a GAIN_RADICAL action.')
except ActionError:
pass
def testApplyActionLoseRadical(self):
"""
Test the GroupBond.applyAction() method for a LOSE_RADICAL action.
"""
action = ['LOSE_RADICAL', '*1', 1]
for order0 in self.orderList:
bond0 = GroupBond(None, None, order=order0)
bond = bond0.copy()
try:
bond.applyAction(action)
self.fail('GroupBond.applyAction() unexpectedly processed a LOSE_RADICAL action.')
except ActionError:
pass
def testEquivalent(self):
"""
Test the GroupBond.equivalent() method.
"""
for order1 in self.orderList:
for order2 in self.orderList:
bond1 = GroupBond(None, None, order=order1)
bond2 = GroupBond(None, None, order=order2)
if order1 == order2 or (all([o in order2 for o in order1]) and all([o in order1 for o in order2])):
self.assertTrue(bond1.equivalent(bond2))
self.assertTrue(bond2.equivalent(bond1))
else:
self.assertFalse(bond1.equivalent(bond2))
self.assertFalse(bond2.equivalent(bond1))
def testIsSpecificCaseOf(self):
"""
Test the GroupBond.isSpecificCaseOf() method.
"""
for order1 in self.orderList:
for order2 in self.orderList:
bond1 = GroupBond(None, None, order=order1)
bond2 = GroupBond(None, None, order=order2)
if order1 == order2 or all([o in order2 for o in order1]):
self.assertTrue(bond1.isSpecificCaseOf(bond2))
else:
self.assertFalse(bond1.isSpecificCaseOf(bond2))
def testCopy(self):
"""
Test the GroupBond.copy() method.
"""
bond = self.bond.copy()
self.assertEqual(len(self.bond.order), len(bond.order))
self.assertEqual(self.bond.order, bond.order)
def testPickle(self):
"""
Test that a GroupBond object can be successfully pickled and
unpickled with no loss of information.
"""
import cPickle
bond = cPickle.loads(cPickle.dumps(self.bond))
self.assertEqual(len(self.bond.order), len(bond.order))
self.assertEqual(self.bond.order, bond.order)
################################################################################
class TestGroup(unittest.TestCase):
"""
Contains unit tests of the Graph class.
"""
def setUp(self):
self.adjlist = """
1 *2 [Cs,Cd] u0 {2,[S,D]} {3,S}
2 *1 [Os,Od] u0 {1,[S,D]}
3 R!H u0 {1,S}
"""
self.group = Group().fromAdjacencyList(self.adjlist)
def testClearLabeledAtoms(self):
"""
Test the Group.clearLabeledAtoms() method.
"""
self.group.clearLabeledAtoms()
for atom in self.group.atoms:
self.assertEqual(atom.label, '')
def testContainsLabeledAtom(self):
"""
Test the Group.containsLabeledAtom() method.
"""
for atom in self.group.atoms:
if atom.label != '':
self.assertTrue(self.group.containsLabeledAtom(atom.label))
self.assertFalse(self.group.containsLabeledAtom('*3'))
self.assertFalse(self.group.containsLabeledAtom('*4'))
self.assertFalse(self.group.containsLabeledAtom('*5'))
self.assertFalse(self.group.containsLabeledAtom('*6'))
def testGetLabeledAtom(self):
"""
Test the Group.getLabeledAtom() method.
"""
for atom in self.group.atoms:
if atom.label != '':
self.assertEqual(atom, self.group.getLabeledAtom(atom.label))
try:
self.group.getLabeledAtom('*3')
self.fail('Unexpected successful return from Group.getLabeledAtom() with invalid atom label.')
except ValueError:
pass
def testGetLabeledAtoms(self):
"""
Test the Group.getLabeledAtoms() method.
"""
labeled = self.group.getLabeledAtoms()
for atom in self.group.atoms:
if atom.label != '':
self.assertTrue(atom.label in labeled)
self.assertTrue(atom in labeled.values())
else:
self.assertFalse(atom.label in labeled)
self.assertFalse(atom in labeled.values())
def testFromAdjacencyList(self):
"""
Test the Group.fromAdjacencyList() method.
"""
atom1, atom2, atom3 = self.group.atoms
self.assertTrue(self.group.hasBond(atom1,atom2))
self.assertTrue(self.group.hasBond(atom1,atom3))
self.assertFalse(self.group.hasBond(atom2,atom3))
bond12 = atom1.bonds[atom2]
bond13 = atom1.bonds[atom3]
self.assertTrue(atom1.label == '*2')
self.assertTrue(atom1.atomType[0].label in ['Cs','Cd'])
self.assertTrue(atom1.atomType[1].label in ['Cs','Cd'])
self.assertTrue(atom1.radicalElectrons == [0])
self.assertTrue(atom2.label == '*1')
self.assertTrue(atom2.atomType[0].label in ['Os','Od'])
self.assertTrue(atom2.atomType[1].label in ['Os','Od'])
self.assertTrue(atom2.radicalElectrons == [0])
self.assertTrue(atom3.label == '')
self.assertTrue(atom3.atomType[0].label == 'R!H')
self.assertTrue(atom3.radicalElectrons == [0])
self.assertTrue(bond12.order == ['S','D'])
self.assertTrue(bond13.order == ['S'])
def testToAdjacencyList(self):
"""
Test the Group.toAdjacencyList() method.
"""
adjlist = self.group.toAdjacencyList()
self.assertEqual(adjlist.strip(), self.adjlist.strip(),adjlist)
def testIsIsomorphic(self):
"""
Test the Group.isIsomorphic() method.
"""
adjlist = """
1 *1 [Os,Od] u0 {3,[S,D]}
2 R!H u0 {3,S}
3 *2 [Cs,Cd] u0 {1,[S,D]} {2,S}
"""
group = Group().fromAdjacencyList(adjlist)
self.assertTrue(self.group.isIsomorphic(group))
self.assertTrue(group.isIsomorphic(self.group))
def testFindIsomorphism(self):
"""
Test the Group.findIsomorphism() method.
"""
adjlist = """
1 *1 [Os,Od] u0 {3,[S,D]}
2 R!H u0 {3,S}
3 *2 [Cs,Cd] u0 {1,[S,D]} {2,S}
"""
group = Group().fromAdjacencyList(adjlist)
result = self.group.findIsomorphism(group)
self.assertEqual(len(result), 1)
for atom1, atom2 in result[0].items():
self.assertTrue(atom1 in self.group.atoms)
self.assertTrue(atom2 in group.atoms)
self.assertTrue(atom1.equivalent(atom2))
for atom3 in atom1.bonds:
atom4 = result[0][atom3]
self.assertTrue(atom4 in atom2.bonds)
self.assertTrue(atom3.equivalent(atom4))
bond1 = atom1.bonds[atom3]
bond2 = atom2.bonds[atom4]
self.assertTrue(bond1.equivalent(bond2))
def testIsSubgraphIsomorphic(self):
"""
Test the Group.isSubgraphIsomorphic() method.
"""
adjlist = """
1 *1 [Cs,Cd] u0
"""
group = Group().fromAdjacencyList(adjlist)
self.assertTrue(self.group.isSubgraphIsomorphic(group))
self.assertFalse(group.isIsomorphic(self.group))
def testFindSubgraphIsomorphisms(self):
"""
Test the Group.findSubgraphIsomorphisms() method.
"""
adjlist = """
1 *1 [Cs,Cd] u0
"""
group = Group().fromAdjacencyList(adjlist)
result = self.group.findSubgraphIsomorphisms(group)
self.assertEqual(len(result), 1)
for atom1, atom2 in result[0].iteritems():
self.assertTrue(atom1 in self.group.atoms)
self.assertTrue(atom2 in group.atoms)
self.assertTrue(atom1.equivalent(atom2))
def testPickle(self):
"""
Test that a Group object can be successfully pickled and
unpickled with no loss of information.
"""
import cPickle
group = cPickle.loads(cPickle.dumps(self.group))
self.assertEqual(len(self.group.atoms), len(group.atoms))
for atom0, atom in zip(group.atoms, self.group.atoms):
self.assertTrue(atom0.equivalent(atom))
self.assertTrue(self.group.isIsomorphic(group))
self.assertTrue(group.isIsomorphic(self.group))
################################################################################
if __name__ == '__main__':
unittest.main( testRunner = unittest.TextTestRunner(verbosity=2) )
|
|
"""
Groundwork recipe pattern.
Provides function to register, get and build recipes.
Recipes are used create directories and files based on a given template and some user input.
It is mostly used to speed up the set up of new python packages, groundwork applications or projects.
Based on cookiecutter: https://github.com/audreyr/cookiecutter/
"""
import os
import logging
from cookiecutter.main import cookiecutter
from groundwork.patterns.gw_base_pattern import GwBasePattern
class GwRecipesPattern(GwBasePattern):
def __init__(self, *args, **kwargs):
super(GwRecipesPattern, self).__init__(*args, **kwargs)
if not hasattr(self.app, "recipes"):
self.app.recipes = RecipesListApplication(self.app)
#: Stores an instance of :class:`~groundwork.patterns.gw_recipes_pattern.RecipesListPlugin`
self.recipes = RecipesListPlugin(self)
# register new recipe (aka template)
# get recipes
class RecipesListPlugin:
"""
Cares about the recipe management on plugin level.
Allows to register, get and build recipes in the context of the current plugin.
:param plugin: plugin, which shall be used as contxt.
"""
def __init__(self, plugin):
self._plugin = plugin
self.__app = plugin.app
self.__log = plugin.log
# Let's register a receiver, which cares about the deactivation process of recipes for this plugin.
# We do it after the original plugin deactivation, so we can be sure that the registered function is the last
# one which cares about recipes for this plugin.
self._plugin.signals.connect(receiver="%s_recipes_deactivation" % self._plugin.name,
signal="plugin_deactivate_post",
function=self.__deactivate_recipes,
description="Deactivate recipes for %s" % self._plugin.name,
sender=self._plugin)
self.__log.debug("Plugin recipes initialised")
def __deactivate_recipes(self, plugin, *args, **kwargs):
"""
Deactivates/unregisters all recipes of the current plugin, if this plugin gets deactivated.
"""
recipes = self.get()
for recipe in recipes.keys():
self.unregister(recipe)
def register(self, name, path, description, final_words=None,
pre_hook=None, post_hook=None):
"""
Registers a new recipe in the context of the current plugin.
:param name: Name of the recipe
:param path: Absolute path of the recipe folder
:param description: A meaningful description of the recipe
:param final_words: A string, which gets printed after the recipe was build.
:param:pre_hook: Function to call before recipe installation
:param:post_hook: Function to call after recipe installation
"""
return self.__app.recipes.register(name, path, self._plugin, description,
final_words, pre_hook, post_hook)
def unregister(self, recipe):
"""
Unregister a recipe of the current plugin.
:param recipe: Name of the recipe.
"""
return self.__app.recipes.unregister(recipe)
def get(self, name=None):
"""
Gets a list of all recipes, which are registered by the current plugin.
If a name is provided, only the requested recipe is returned or None.
:param: name: Name of the recipe
"""
return self.__app.recipes.get(name, self._plugin)
def build(self, recipe, no_input=False, extra_context=None):
"""
Builds a recipe
:param recipe: Name of the recipe to build.
:param no_input: Prompt the user at command line for manual configuration?
:param extra_context: A dictionary of context that overrides default
and user configuration
"""
return self.__app.recipes.build(recipe, self._plugin, no_input, extra_context)
class RecipesListApplication:
"""
Cares about the recipe management on application level.
Allows to register, get and build recipes.
:param app: groundwork application instance
"""
def __init__(self, app):
self.__app = app
self.recipes = {}
self.__log = logging.getLogger(__name__)
self.__log.info("Application recipes initialised")
def register(self, name, path, plugin, description=None, final_words=None,
pre_hook=None, post_hook=None):
"""
Registers a new recipe.
"""
if name in self.recipes.keys():
raise RecipeExistsException("Recipe %s was already registered by %s" %
(name, self.recipes["name"].plugin.name))
if pre_hook is not None and not callable(pre_hook):
raise IncorrectParameterTypeException('Data type for pre_hook is not correct')
if post_hook is not None and not callable(post_hook):
raise IncorrectParameterTypeException('Data type for post_hook is not correct')
self.recipes[name] = Recipe(name, path, plugin, description,
final_words, pre_hook, post_hook)
self.__log.debug("Recipe %s registered by %s" % (name, plugin.name))
return self.recipes[name]
def unregister(self, recipe):
"""
Unregisters an existing recipe, so that this recipe is no longer available.
This function is mainly used during plugin deactivation.
:param recipe: Name of the recipe
"""
if recipe not in self.recipes.keys():
self.__log.warning("Can not unregister recipe %s" % recipe)
else:
del (self.recipes[recipe])
self.__log.debug("Recipe %s got unregistered" % recipe)
def get(self, recipe=None, plugin=None):
"""
Get one or more recipes.
:param recipe: Name of the recipe
:type recipe: str
:param plugin: Plugin object, under which the recipe was registered
:type plugin: GwBasePattern
"""
if plugin is not None:
if recipe is None:
recipes_list = {}
for key in self.recipes.keys():
if self.recipes[key].plugin == plugin:
recipes_list[key] = self.recipes[key]
return recipes_list
else:
if recipe in self.recipes.keys():
if self.recipes[recipe].plugin == plugin:
return self.recipes[recipe]
else:
return None
else:
return None
else:
if recipe is None:
return self.recipes
else:
if recipe in self.recipes.keys():
return self.recipes[recipe]
else:
return None
def build(self, recipe, plugin=None, no_input=False, extra_context=None):
"""
Execute a recipe and creates new folder and files.
:param recipe: Name of the recipe
:param no_input: Prompt the user at command line for manual configuration?
:param extra_context: A dictionary of context that overrides default
and user configuration
:param plugin: Name of the plugin, to which the recipe must belong.
"""
if recipe not in self.recipes.keys():
raise RecipeMissingException("Recipe %s unknown." % recipe)
recipe_obj = self.recipes[recipe]
if plugin is not None:
if recipe_obj.plugin != plugin:
raise RecipeWrongPluginException("The requested recipe does not belong to the given plugin. Use"
"the app object, to retrieve the requested recipe: "
"my_app.recipes.get(%s)" % recipe)
recipe_obj.build(no_input, extra_context)
class Recipe:
"""
A recipe is an existing folder, which will be handled by the underlying cookiecutter library as template folder.
:param name: Name of the recipe
:param path: Absolute path to the recipe folder
:param plugin: Plugin which registers the recipe
:param description: Meaningful description of the recipe
:param final_words: String, which gets printed after a recipe was successfully build.
:param:pre_hook: Function to call before recipe installation
:param:post_hook: Function to call after recipe installation
"""
def __init__(self, name, path, plugin, description="", final_words="",
pre_hook=None, post_hook=None):
self.name = name
if os.path.isabs(path):
self.path = path
else:
raise IOError("Path of recipe must be absolute. Got %s" % path)
self.plugin = plugin
self.description = description
self.final_words = final_words
self.pre_hook = pre_hook
self.post_hook = post_hook
self.__log = logging.getLogger(__name__)
def build(self, output_dir=None, no_input=False, extra_context=None, **kwargs):
"""
Builds the recipe and creates needed folder and files.
May ask the user for some parameter inputs.
:param output_dir: Path, where the recipe shall be build. Default is the current working directory
:param no_input: Prompt the user at command line for manual configuration?
:param extra_context: A dictionary of context that overrides default and user configuration.
:return: location of the installed recipe
"""
if output_dir is None:
output_dir = os.getcwd()
if type(no_input) is not bool:
raise IncorrectParameterTypeException('Data type for no_input is not correct')
if no_input is True:
if type(extra_context) is not dict and extra_context is not None:
raise IncorrectParameterTypeException('Data type for extra_context is not correct')
if no_input is False:
extra_context = None
no_input = no_input
extra_context = extra_context
if self.pre_hook is not None and not self.pre_hook():
raise HooksException('Pre-hook failure')
target = cookiecutter(self.path,
output_dir=output_dir,
no_input=no_input,
extra_context=extra_context,
**kwargs)
if self.post_hook is not None and not self.post_hook():
raise HooksException('Post-hook failure')
if self.final_words is not None and len(self.final_words) > 0:
print("")
print(self.final_words)
return target
class RecipeExistsException(BaseException):
pass
class RecipeMissingException(BaseException):
pass
class RecipeWrongPluginException(BaseException):
pass
class IncorrectParameterTypeException(BaseException):
pass
class HooksException(BaseException):
pass
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.convolutional."""
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras.legacy_tf_layers import convolutional as conv_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ConvTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, 'data_format'):
conv_layers.conv2d(images, 32, 3, data_format='invalid')
def testInvalidStrides(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, 'strides'):
conv_layers.conv2d(images, 32, 3, strides=(1, 2, 3))
with self.assertRaisesRegex(ValueError, 'strides'):
conv_layers.conv2d(images, 32, 3, strides=None)
def testInvalidKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, 'kernel_size'):
conv_layers.conv2d(images, 32, (1, 2, 3))
with self.assertRaisesRegex(ValueError, 'kernel_size'):
conv_layers.conv2d(images, 32, None)
def testCreateConv2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(32, [3, 3], activation=nn_ops.relu)
output = layer.apply(images)
if not context.executing_eagerly():
self.assertEqual(output.op.name, 'conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv2DFloat16(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4), dtype='float16')
output = conv_layers.conv2d(images, 32, [3, 3], activation=nn_ops.relu)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
def testCreateConv2DIntegerKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(32, 3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateConv2DChannelsFirst(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, 4, height, width))
layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height - 2, width - 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testUnknownInputChannels(self):
with ops.Graph().as_default():
images = array_ops.placeholder(dtypes.float32, (5, 7, 9, None))
layer = conv_layers.Conv2D(32, [3, 3], activation=nn_ops.relu)
with self.assertRaisesRegex(
ValueError, 'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(images)
images = array_ops.placeholder(dtypes.float32, (5, None, 7, 9))
layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first')
with self.assertRaisesRegex(
ValueError, 'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(images)
def testConv2DPaddingSame(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layer = conv_layers.Conv2D(64, images.get_shape()[1:3], padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 64])
def testCreateConvWithStrides(self):
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layer = conv_layers.Conv2D(32, [3, 3], strides=(2, 2), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test strides integer
layer = conv_layers.Conv2D(32, [3, 3], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test unequal strides
layer = conv_layers.Conv2D(32, [3, 3], strides=(2, 1), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width, 32])
def testCreateConv1D(self):
width = 7
data = random_ops.random_uniform((5, width, 4))
layer = conv_layers.Conv1D(32, 3, activation=nn_ops.relu)
output = layer.apply(data)
if not context.executing_eagerly():
self.assertEqual(output.op.name, 'conv1d/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv1DFloat16(self):
width = 7
data = random_ops.random_uniform((5, width, 4), dtype='float16')
output = conv_layers.conv1d(data, 32, 3, activation=nn_ops.relu)
self.assertListEqual(output.get_shape().as_list(), [5, width - 2, 32])
def testCreateConv1DChannelsFirst(self):
with ops.Graph().as_default():
width = 7
data = random_ops.random_uniform((5, 4, width))
layer = conv_layers.Conv1D(32, 3, data_format='channels_first')
output = layer.apply(data)
self.assertListEqual(output.get_shape().as_list(), [5, 32, width - 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testUnknownInputChannelsConv1D(self):
with ops.Graph().as_default():
data = array_ops.placeholder(dtypes.float32, (5, 4, None))
layer = conv_layers.Conv1D(32, 3, activation=nn_ops.relu)
with self.assertRaisesRegex(
ValueError, 'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(data)
data = array_ops.placeholder(dtypes.float32, (5, None, 4))
layer = conv_layers.Conv1D(32, 3, data_format='channels_first')
with self.assertRaisesRegex(
ValueError, 'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(data)
def testCreateConv3D(self):
depth, height, width = 6, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 4))
layer = conv_layers.Conv3D(32, [3, 3, 3], activation=nn_ops.relu)
output = layer.apply(volumes)
if not context.executing_eagerly():
self.assertEqual(output.op.name, 'conv3d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, depth - 2, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testUnknownInputChannelsConv3D(self):
with ops.Graph().as_default():
volumes = array_ops.placeholder(dtypes.float32, (5, 6, 7, 9, None))
layer = conv_layers.Conv3D(32, [3, 3, 3], activation=nn_ops.relu)
with self.assertRaisesRegex(
ValueError, 'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(volumes)
def testConv2DKernelRegularizer(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2D(32, [3, 3], kernel_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys))
def testConv2DBiasRegularizer(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2D(32, [3, 3], bias_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys))
def testConv2DNoBias(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(
32, [3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(images)
if not context.executing_eagerly():
self.assertEqual(output.op.name, 'conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertEqual(layer.bias, None)
def testDilatedConv2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(32, [3, 3], dilation_rate=3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 3, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
# Test tuple dilation rate
layer = conv_layers.Conv2D(32, [3, 3], dilation_rate=(1, 3))
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height - 2, 3, 32])
def testFunctionalConv2DReuse(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d(images, 32, [3, 3], name='conv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv2DReuseFromScope(self):
with ops.Graph().as_default():
with variable_scope.variable_scope('scope'):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
self.assertEqual(len(variables.trainable_variables()), 2)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv2DInitializerFromScope(self):
with ops.Graph().as_default(), self.cached_session():
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('kernel' in weights[0].name)
self.assertTrue('bias' in weights[1].name)
self.evaluate(variables.global_variables_initializer())
weights = self.evaluate(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 3, 32)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((32)))
def testFunctionalConv2DNoReuse(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 4)
def testConstraints(self):
# Conv1D
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
conv1d = conv_layers.Conv1D(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 5), seed=1)
conv1d(inputs)
self.assertEqual(conv1d.kernel_constraint, k_constraint)
self.assertEqual(conv1d.bias_constraint, b_constraint)
# Conv2D
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
conv2d = conv_layers.Conv2D(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 5), seed=1)
conv2d(inputs)
self.assertEqual(conv2d.kernel_constraint, k_constraint)
self.assertEqual(conv2d.bias_constraint, b_constraint)
# Conv3D
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
conv3d = conv_layers.Conv3D(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 3, 5), seed=1)
conv3d(inputs)
self.assertEqual(conv3d.kernel_constraint, k_constraint)
self.assertEqual(conv3d.bias_constraint, b_constraint)
def testConv3DChannelsFirst(self):
# Test case for GitHub issue 15655
with ops.Graph().as_default():
images = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, 1, 32, 32, 32])
conv_layers.conv3d(images, 32, 9, data_format='channels_first')
class SeparableConv1DTest(test.TestCase):
def testInvalidDataFormat(self):
length = 9
data = random_ops.random_uniform((5, length, 3), seed=1)
with self.assertRaisesRegex(ValueError, 'data_format'):
conv_layers.separable_conv1d(data, 32, 3, data_format='invalid')
def testInvalidStrides(self):
length = 9
data = random_ops.random_uniform((5, length, 3), seed=1)
with self.assertRaisesRegex(ValueError, 'strides'):
conv_layers.separable_conv1d(data, 32, 3, strides=(1, 2))
with self.assertRaisesRegex(ValueError, 'strides'):
conv_layers.separable_conv1d(data, 32, 3, strides=None)
def testInvalidKernelSize(self):
length = 9
data = random_ops.random_uniform((5, length, 3), seed=1)
with self.assertRaisesRegex(ValueError, 'kernel_size'):
conv_layers.separable_conv1d(data, 32, (1, 2))
with self.assertRaisesRegex(ValueError, 'kernel_size'):
conv_layers.separable_conv1d(data, 32, None)
def testCreateSeparableConv1D(self):
length = 9
data = random_ops.random_uniform((5, length, 4))
layer = conv_layers.SeparableConv1D(32, 3, activation=nn_ops.relu)
output = layer.apply(data)
if not context.executing_eagerly():
self.assertEqual(output.op.name, 'separable_conv1d/Relu')
self.assertEqual(output.get_shape().as_list(), [5, length - 2, 32])
self.assertEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 4, 1])
self.assertEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 4, 32])
self.assertEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv1DDepthMultiplier(self):
length = 9
data = random_ops.random_uniform((5, length, 4))
layer = conv_layers.SeparableConv1D(32, 3, depth_multiplier=2)
output = layer.apply(data)
self.assertEqual(output.get_shape().as_list(), [5, length - 2, 32])
self.assertEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 4, 2])
self.assertEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 8, 32])
self.assertEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv1DChannelsFirst(self):
with ops.Graph().as_default():
length = 9
data = random_ops.random_uniform((5, 4, length))
layer = conv_layers.SeparableConv1D(32, 3, data_format='channels_first')
output = layer.apply(data)
self.assertEqual(output.get_shape().as_list(), [5, 32, length - 2])
self.assertEqual(layer.depthwise_kernel.get_shape().as_list(), [3, 4, 1])
self.assertEqual(layer.pointwise_kernel.get_shape().as_list(), [1, 4, 32])
self.assertEqual(layer.bias.get_shape().as_list(), [32])
def testSeparableConv1DPaddingSame(self):
length = 9
data = random_ops.random_uniform((5, length, 32), seed=1)
layer = conv_layers.SeparableConv1D(
64, length, padding='same')
output = layer.apply(data)
self.assertEqual(output.get_shape().as_list(), [5, length, 64])
def testCreateSeparableConv1DWithStrides(self):
length = 10
data = random_ops.random_uniform((5, length, 3), seed=1)
layer = conv_layers.SeparableConv1D(32, 3, strides=2, padding='same')
output = layer.apply(data)
self.assertEqual(output.get_shape().as_list(), [5, length // 2, 32])
def testCreateSeparableConv1DWithStridesChannelsFirst(self):
with ops.Graph().as_default():
data_format = 'channels_first'
length = 10
data = random_ops.random_uniform((5, 3, length), seed=1)
layer = conv_layers.SeparableConv1D(
32, 3, strides=2, padding='same', data_format=data_format)
output = layer.apply(data)
self.assertEqual(output.get_shape().as_list(), [5, 32, length // 2])
def testFunctionalConv1DReuse(self):
with ops.Graph().as_default():
length = 10
data = random_ops.random_uniform((5, length, 3), seed=1)
conv_layers.separable_conv1d(data, 32, 3, name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
conv_layers.separable_conv1d(data, 32, 3, name='sepconv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 3)
def testFunctionalConv1DReuseFromScope(self):
with ops.Graph().as_default():
with variable_scope.variable_scope('scope'):
length = 10
data = random_ops.random_uniform((5, length, 3), seed=1)
conv_layers.separable_conv1d(data, 32, 3, name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.separable_conv1d(data, 32, 3, name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
def testFunctionalConv1DNoReuse(self):
with ops.Graph().as_default():
length = 10
data = random_ops.random_uniform((5, length, 3), seed=1)
conv_layers.separable_conv1d(data, 32, 3)
self.assertEqual(len(variables.trainable_variables()), 3)
conv_layers.separable_conv1d(data, 32, 3)
self.assertEqual(len(variables.trainable_variables()), 6)
def testSeparableConv1DDepthwiseRegularizer(self):
with ops.Graph().as_default():
length = 9
data = random_ops.random_uniform((5, length, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv1D(32, 3, depthwise_regularizer=reg)
layer.apply(data)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys))
def testSeparableConv1DPointwiseRegularizer(self):
with ops.Graph().as_default():
length = 9
data = random_ops.random_uniform((5, length, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv1D(32, 3, pointwise_regularizer=reg)
layer.apply(data)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys))
def testSeparableConv1DBiasRegularizer(self):
with ops.Graph().as_default():
length = 9
data = random_ops.random_uniform((5, length, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv1D(32, 3, bias_regularizer=reg)
layer.apply(data)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys))
def testSeparableConv1DNoBias(self):
with ops.Graph().as_default():
length = 9
data = random_ops.random_uniform((5, length, 4))
layer = conv_layers.SeparableConv1D(
32, 3, activation=nn_ops.relu, use_bias=False)
output = layer.apply(data)
self.assertEqual(output.op.name, 'separable_conv1d/Relu')
self.assertEqual(layer.bias, None)
def testConstraints(self):
d_constraint = lambda x: x / math_ops.reduce_sum(x)
p_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
layer = conv_layers.SeparableConv1D(2, 3,
depthwise_constraint=d_constraint,
pointwise_constraint=p_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.depthwise_constraint, d_constraint)
self.assertEqual(layer.pointwise_constraint, p_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
class SeparableConv2DTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, 'data_format'):
conv_layers.separable_conv2d(images, 32, 3, data_format='invalid')
def testInvalidStrides(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, 'strides'):
conv_layers.separable_conv2d(images, 32, 3, strides=(1, 2, 3))
with self.assertRaisesRegex(ValueError, 'strides'):
conv_layers.separable_conv2d(images, 32, 3, strides=None)
def testInvalidKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, 'kernel_size'):
conv_layers.separable_conv2d(images, 32, (1, 2, 3))
with self.assertRaisesRegex(ValueError, 'kernel_size'):
conv_layers.separable_conv2d(images, 32, None)
def testCreateSeparableConv2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(32, [3, 3], activation=nn_ops.relu)
output = layer.apply(images)
if not context.executing_eagerly():
self.assertEqual(output.op.name, 'separable_conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv2DDepthMultiplier(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(32, [3, 3], depth_multiplier=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 2])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 8, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv2DIntegerKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(32, 3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv2DChannelsFirst(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, 4, height, width))
layer = conv_layers.SeparableConv2D(
32, [3, 3], data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height - 2, width - 2])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testSeparableConv2DPaddingSame(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layer = conv_layers.SeparableConv2D(
64, images.get_shape()[1:3], padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 64])
def testCreateSeparableConvWithStrides(self):
with ops.Graph().as_default():
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 2), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test strides integer
layer = conv_layers.SeparableConv2D(32, [3, 3], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test unequal strides
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 1), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width, 32])
def testCreateSeparableConvWithStridesChannelsFirst(self):
with ops.Graph().as_default():
data_format = 'channels_first'
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, 3, height, width), seed=1)
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 2), padding='same', data_format=data_format)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height / 2, width / 2])
# Test strides integer
layer = conv_layers.SeparableConv2D(32, [3, 3], strides=2, padding='same',
data_format=data_format)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height / 2, width / 2])
# Test unequal strides
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 1), padding='same', data_format=data_format)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height / 2, width])
def testFunctionalConv2DReuse(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
conv_layers.separable_conv2d(
images, 32, [3, 3], name='sepconv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 3)
def testFunctionalConv2DReuseFromScope(self):
with ops.Graph().as_default():
with variable_scope.variable_scope('scope'):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
def testFunctionalConv2DInitializerFromScope(self):
with ops.Graph().as_default(), self.cached_session():
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('depthwise_kernel' in weights[0].name)
self.assertTrue('pointwise_kernel' in weights[1].name)
self.assertTrue('bias' in weights[2].name)
self.evaluate(variables.global_variables_initializer())
weights = self.evaluate(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 3, 1)))
self.assertAllClose(weights[1], np.ones((1, 1, 3, 32)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[2], np.zeros((32)))
def testFunctionalConv2DNoReuse(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 3)
conv_layers.separable_conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 6)
def testSeparableConv2DDepthwiseRegularizer(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv2D(32, [3, 3], depthwise_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys))
def testSeparableConv2DPointwiseRegularizer(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv2D(32, [3, 3], pointwise_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys))
def testSeparableConv2DBiasRegularizer(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv2D(32, [3, 3], bias_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys))
def testSeparableConv2DNoBias(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(
32, [3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(images)
self.assertEqual(output.op.name, 'separable_conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertEqual(layer.bias, None)
def testConstraints(self):
d_constraint = lambda x: x / math_ops.reduce_sum(x)
p_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
layer = conv_layers.SeparableConv2D(2, 3,
depthwise_constraint=d_constraint,
pointwise_constraint=p_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.depthwise_constraint, d_constraint)
self.assertEqual(layer.pointwise_constraint, p_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
class Conv2DTransposeTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, 'data_format'):
conv_layers.conv2d_transpose(images, 32, 3, data_format='invalid')
def testInvalidStrides(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, 'strides'):
conv_layers.conv2d_transpose(images, 32, 3, strides=(1, 2, 3))
with self.assertRaisesRegex(ValueError, 'strides'):
conv_layers.conv2d_transpose(images, 32, 3, strides=None)
def testInvalidKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, 'kernel_size'):
conv_layers.conv2d_transpose(images, 32, (1, 2, 3))
with self.assertRaisesRegex(ValueError, 'kernel_size'):
conv_layers.conv2d_transpose(images, 32, None)
def testCreateConv2DTranspose(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2DTranspose(32, [3, 3], activation=nn_ops.relu)
output = layer.apply(images)
if not context.executing_eagerly():
self.assertEqual(output.op.name, 'conv2d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv2DTransposeFloat16(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4), dtype='float16')
output = conv_layers.conv2d_transpose(images, 32, [3, 3],
activation=nn_ops.relu)
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
def testCreateConv2DTransposeIntegerKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2DTranspose(32, 3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateConv2DTransposeChannelsFirst(self):
height, width = 7, 9
images = random_ops.random_uniform((5, 4, height, width))
layer = conv_layers.Conv2DTranspose(
32, [3, 3], data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height + 2, width + 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv2DTransposePaddingSame(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layer = conv_layers.Conv2DTranspose(
64, images.get_shape()[1:3], padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 64])
def testCreateConv2DTransposeWithStrides(self):
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layer = conv_layers.Conv2DTranspose(
32, [3, 3], strides=(2, 2), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height * 2, width * 2, 32])
# Test strides integer
layer = conv_layers.Conv2DTranspose(32, [3, 3], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height * 2, width * 2, 32])
# Test unequal strides
layer = conv_layers.Conv2DTranspose(
32, [3, 3], strides=(2, 1), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height * 2, width, 32])
def testConv2DTransposeKernelRegularizer(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2DTranspose(32, [3, 3], kernel_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys))
def testConv2DTransposeBiasRegularizer(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2DTranspose(32, [3, 3], bias_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys))
def testConv2DTransposeNoBias(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2DTranspose(
32, [3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(images)
self.assertEqual(output.op.name, 'conv2d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertEqual(layer.bias, None)
def testFunctionalConv2DTransposeReuse(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d_transpose(
images, 32, [3, 3], name='deconv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv2DTransposeReuseFromScope(self):
with ops.Graph().as_default():
with variable_scope.variable_scope('scope'):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv2DTransposeInitializerFromScope(self):
with ops.Graph().as_default(), self.cached_session():
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('kernel' in weights[0].name)
self.assertTrue('bias' in weights[1].name)
self.evaluate(variables.global_variables_initializer())
weights = self.evaluate(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 32, 3)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((32)))
def testFunctionalConv2DTransposeNoReuse(self):
with ops.Graph().as_default():
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d_transpose(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 4)
def testConstraints(self):
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
layer = conv_layers.Conv2DTranspose(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.kernel_constraint, k_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
class Conv3DTransposeTest(test.TestCase):
def testInvalidDataFormat(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
with self.assertRaisesRegex(ValueError, 'data_format'):
conv_layers.conv3d_transpose(volumes, 4, 3, data_format='invalid')
def testInvalidStrides(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
with self.assertRaisesRegex(ValueError, 'strides'):
conv_layers.conv3d_transpose(volumes, 4, 3, strides=(1, 2))
with self.assertRaisesRegex(ValueError, 'strides'):
conv_layers.conv3d_transpose(volumes, 4, 3, strides=None)
def testInvalidKernelSize(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
with self.assertRaisesRegex(ValueError, 'kernel_size'):
conv_layers.conv3d_transpose(volumes, 4, (1, 2))
with self.assertRaisesRegex(ValueError, 'kernel_size'):
conv_layers.conv3d_transpose(volumes, 4, None)
def testCreateConv3DTranspose(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], activation=nn_ops.relu)
output = layer.apply(volumes)
if not context.executing_eagerly():
self.assertEqual(output.op.name, 'conv3d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, depth + 2, height + 2, width + 2, 4])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [4])
def testCreateConv3DTransposeIntegerKernelSize(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
layer = conv_layers.Conv3DTranspose(4, 3)
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth + 2, height + 2, width + 2, 4])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [4])
def testCreateConv3DTransposeChannelsFirst(self):
with ops.Graph().as_default():
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, 32, depth, height, width))
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], data_format='channels_first')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, 4, depth + 2, height + 2, width + 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [4])
def testConv3DTransposePaddingSame(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 64), seed=1)
layer = conv_layers.Conv3DTranspose(
32, volumes.get_shape()[1:4], padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth, height, width, 32])
def testCreateConv3DTransposeWithStrides(self):
depth, height, width = 4, 6, 8
# Test strides tuple.
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], strides=(2, 2, 2), padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth * 2, height * 2, width * 2, 4])
# Test strides integer.
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], strides=2, padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth * 2, height * 2, width * 2, 4])
# Test unequal strides.
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], strides=(2, 1, 1), padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth * 2, height, width, 4])
def testConv3DTransposeKernelRegularizer(self):
with ops.Graph().as_default():
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], kernel_regularizer=reg)
layer.apply(volumes)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys))
def testConv3DTransposeBiasRegularizer(self):
with ops.Graph().as_default():
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], bias_regularizer=reg)
layer.apply(volumes)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys))
def testConv3DTransposeNoBias(self):
with ops.Graph().as_default():
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(volumes)
self.assertEqual(output.op.name, 'conv3d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, depth + 2, height + 2, width + 2, 4])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertEqual(layer.bias, None)
def testFunctionalConv3DTransposeReuse(self):
with ops.Graph().as_default():
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv3d_transpose(
volumes, 4, [3, 3, 3], name='deconv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv3DTransposeReuseFromScope(self):
with ops.Graph().as_default():
with variable_scope.variable_scope('scope'):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform(
(5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv3DTransposeInitializerFromScope(self):
with ops.Graph().as_default(), self.cached_session():
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform(
(5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('kernel' in weights[0].name)
self.assertTrue('bias' in weights[1].name)
self.evaluate(variables.global_variables_initializer())
weights = self.evaluate(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 3, 4, 32)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((4)))
def testFunctionalConv3DTransposeNoReuse(self):
with ops.Graph().as_default():
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3])
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3])
self.assertEqual(len(variables.trainable_variables()), 4)
def testConstraints(self):
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
layer = conv_layers.Conv3DTranspose(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.kernel_constraint, k_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
if __name__ == '__main__':
test.main()
|
|
#!/usr/bin/env python
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for checking EcmaScript files for indentation issues."""
__author__ = ('robbyw@google.com (Robert Walker)')
import gflags as flags
from closure_linter import ecmametadatapass
from closure_linter import errors
from closure_linter import javascripttokens
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import position
flags.DEFINE_boolean('debug_indentation', False,
'Whether to print debugging information for indentation.')
# Shorthand
Context = ecmametadatapass.EcmaContext
Error = error.Error
Position = position.Position
Type = javascripttokens.JavaScriptTokenType
# The general approach:
#
# 1. Build a stack of tokens that can affect indentation.
# For each token, we determine if it is a block or continuation token.
# Some tokens need to be temporarily overwritten in case they are removed
# before the end of the line.
# Much of the work here is determining which tokens to keep on the stack
# at each point. Operators, for example, should be removed once their
# expression or line is gone, while parentheses must stay until the matching
# end parentheses is found.
#
# 2. Given that stack, determine the allowable indentations.
# Due to flexible indentation rules in JavaScript, there may be many
# allowable indentations for each stack. We follows the general
# "no false positives" approach of GJsLint and build the most permissive
# set possible.
class TokenInfo(object):
"""Stores information about a token.
Attributes:
token: The token
is_block: Whether the token represents a block indentation.
is_transient: Whether the token should be automatically removed without
finding a matching end token.
overridden_by: TokenInfo for a token that overrides the indentation that
this token would require.
is_permanent_override: Whether the override on this token should persist
even after the overriding token is removed from the stack. For example:
x([
1],
2);
needs this to be set so the last line is not required to be a continuation
indent.
line_number: The effective line number of this token. Will either be the
actual line number or the one before it in the case of a mis-wrapped
operator.
"""
def __init__(self, token, is_block=False):
"""Initializes a TokenInfo object.
Args:
token: The token
is_block: Whether the token represents a block indentation.
"""
self.token = token
self.overridden_by = None
self.is_permanent_override = False
self.is_block = is_block
self.is_transient = not is_block and token.type not in (
Type.START_PAREN, Type.START_PARAMETERS)
self.line_number = token.line_number
def __repr__(self):
result = '\n %s' % self.token
if self.overridden_by:
result = '%s OVERRIDDEN [by "%s"]' % (
result, self.overridden_by.token.string)
result += ' {is_block: %s, is_transient: %s}' % (
self.is_block, self.is_transient)
return result
class IndentationRules(object):
"""EmcaScript indentation rules.
Can be used to find common indentation errors in JavaScript, ActionScript and
other Ecma like scripting languages.
"""
def __init__(self):
"""Initializes the IndentationRules checker."""
self._stack = []
# Map from line number to number of characters it is off in indentation.
self._start_index_offset = {}
def Finalize(self):
if self._stack:
old_stack = self._stack
self._stack = []
raise Exception('INTERNAL ERROR: indentation stack is not empty: %r' %
old_stack)
def CheckToken(self, token, state):
"""Checks a token for indentation errors.
Args:
token: The current token under consideration
state: Additional information about the current tree state
Returns:
An error array [error code, error string, error token] if the token is
improperly indented, or None if indentation is correct.
"""
token_type = token.type
indentation_errors = []
stack = self._stack
is_first = self._IsFirstNonWhitespaceTokenInLine(token)
# Add tokens that could decrease indentation before checking.
if token_type == Type.END_PAREN:
self._PopTo(Type.START_PAREN)
elif token_type == Type.END_PARAMETERS:
self._PopTo(Type.START_PARAMETERS)
elif token_type == Type.END_BRACKET:
self._PopTo(Type.START_BRACKET)
elif token_type == Type.END_BLOCK:
start_token = self._PopTo(Type.START_BLOCK)
# Check for required goog.scope comment.
if start_token:
goog_scope = tokenutil.GoogScopeOrNoneFromStartBlock(start_token.token)
if goog_scope is not None:
if not token.line.endswith('; // goog.scope\n'):
if (token.line.find('//') > -1 and
token.line.find('goog.scope') >
token.line.find('//')):
indentation_errors.append([
errors.MALFORMED_END_OF_SCOPE_COMMENT,
('Malformed end of goog.scope comment. Please use the '
'exact following syntax to close the scope:\n'
'}); // goog.scope'),
token,
Position(token.start_index, token.length)])
else:
indentation_errors.append([
errors.MISSING_END_OF_SCOPE_COMMENT,
('Missing comment for end of goog.scope which opened at line '
'%d. End the scope with:\n'
'}); // goog.scope' %
(start_token.line_number)),
token,
Position(token.start_index, token.length)])
elif token_type == Type.KEYWORD and token.string in ('case', 'default'):
self._Add(self._PopTo(Type.START_BLOCK))
elif is_first and token.string == '.':
# This token should have been on the previous line, so treat it as if it
# was there.
info = TokenInfo(token)
info.line_number = token.line_number - 1
self._Add(info)
elif token_type == Type.SEMICOLON:
self._PopTransient()
not_binary_operator = (token_type != Type.OPERATOR or
token.metadata.IsUnaryOperator())
not_dot = token.string != '.'
if is_first and not_binary_operator and not_dot and token.type not in (
Type.COMMENT, Type.DOC_PREFIX, Type.STRING_TEXT):
if flags.FLAGS.debug_indentation:
print 'Line #%d: stack %r' % (token.line_number, stack)
# Ignore lines that start in JsDoc since we don't check them properly yet.
# TODO(robbyw): Support checking JsDoc indentation.
# Ignore lines that start as multi-line strings since indentation is N/A.
# Ignore lines that start with operators since we report that already.
# Ignore lines with tabs since we report that already.
expected = self._GetAllowableIndentations()
actual = self._GetActualIndentation(token)
# Special case comments describing else, case, and default. Allow them
# to outdent to the parent block.
if token_type in Type.COMMENT_TYPES:
next_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
if next_code and next_code.type == Type.END_BLOCK:
next_code = tokenutil.SearchExcept(next_code, Type.NON_CODE_TYPES)
if next_code and next_code.string in ('else', 'case', 'default'):
# TODO(robbyw): This almost certainly introduces false negatives.
expected |= self._AddToEach(expected, -2)
if actual >= 0 and actual not in expected:
expected = sorted(expected)
indentation_errors.append([
errors.WRONG_INDENTATION,
'Wrong indentation: expected any of {%s} but got %d' % (
', '.join(
['%d' % x for x in expected]), actual),
token,
Position(actual, expected[0])])
self._start_index_offset[token.line_number] = expected[0] - actual
# Add tokens that could increase indentation.
if token_type == Type.START_BRACKET:
self._Add(TokenInfo(
token=token,
is_block=token.metadata.context.type == Context.ARRAY_LITERAL))
elif token_type == Type.START_BLOCK or token.metadata.is_implied_block:
self._Add(TokenInfo(token=token, is_block=True))
elif token_type in (Type.START_PAREN, Type.START_PARAMETERS):
self._Add(TokenInfo(token=token, is_block=False))
elif token_type == Type.KEYWORD and token.string == 'return':
self._Add(TokenInfo(token))
elif not token.IsLastInLine() and (
token.IsAssignment() or token.IsOperator('?')):
self._Add(TokenInfo(token=token))
# Handle implied block closes.
if token.metadata.is_implied_block_close:
self._PopToImpliedBlock()
# Add some tokens only if they appear at the end of the line.
is_last = self._IsLastCodeInLine(token)
if is_last:
if token_type == Type.OPERATOR:
if token.string == ':':
if stack and stack[-1].token.string == '?':
# When a ternary : is on a different line than its '?', it doesn't
# add indentation.
if token.line_number == stack[-1].token.line_number:
self._Add(TokenInfo(token))
elif token.metadata.context.type == Context.CASE_BLOCK:
# Pop transient tokens from say, line continuations, e.g.,
# case x.
# y:
# Want to pop the transient 4 space continuation indent.
self._PopTransient()
# Starting the body of the case statement, which is a type of
# block.
self._Add(TokenInfo(token=token, is_block=True))
elif token.metadata.context.type == Context.LITERAL_ELEMENT:
# When in an object literal, acts as operator indicating line
# continuations.
self._Add(TokenInfo(token))
pass
else:
# ':' might also be a statement label, no effect on indentation in
# this case.
pass
elif token.string != ',':
self._Add(TokenInfo(token))
else:
# The token is a comma.
if token.metadata.context.type == Context.VAR:
self._Add(TokenInfo(token))
elif token.metadata.context.type != Context.PARAMETERS:
self._PopTransient()
elif (token.string.endswith('.')
and token_type in (Type.IDENTIFIER, Type.NORMAL)):
self._Add(TokenInfo(token))
elif token_type == Type.PARAMETERS and token.string.endswith(','):
# Parameter lists.
self._Add(TokenInfo(token))
elif token.IsKeyword('var'):
self._Add(TokenInfo(token))
elif token.metadata.is_implied_semicolon:
self._PopTransient()
elif token.IsAssignment():
self._Add(TokenInfo(token))
return indentation_errors
def _AddToEach(self, original, amount):
"""Returns a new set with the given amount added to each element.
Args:
original: The original set of numbers
amount: The amount to add to each element
Returns:
A new set containing each element of the original set added to the amount.
"""
return set([x + amount for x in original])
_HARD_STOP_TYPES = (Type.START_PAREN, Type.START_PARAMETERS,
Type.START_BRACKET)
_HARD_STOP_STRINGS = ('return', '?')
def _IsHardStop(self, token):
"""Determines if the given token can have a hard stop after it.
Args:
token: token to examine
Returns:
Whether the token can have a hard stop after it.
Hard stops are indentations defined by the position of another token as in
indentation lined up with return, (, [, and ?.
"""
return (token.type in self._HARD_STOP_TYPES or
token.string in self._HARD_STOP_STRINGS or
token.IsAssignment())
def _GetAllowableIndentations(self):
"""Computes the set of allowable indentations.
Returns:
The set of allowable indentations, given the current stack.
"""
expected = set([0])
hard_stops = set([])
# Whether the tokens are still in the same continuation, meaning additional
# indentation is optional. As an example:
# x = 5 +
# 6 +
# 7;
# The second '+' does not add any required indentation.
in_same_continuation = False
for token_info in self._stack:
token = token_info.token
# Handle normal additive indentation tokens.
if not token_info.overridden_by and token.string != 'return':
if token_info.is_block:
expected = self._AddToEach(expected, 2)
hard_stops = self._AddToEach(hard_stops, 2)
in_same_continuation = False
elif in_same_continuation:
expected |= self._AddToEach(expected, 4)
hard_stops |= self._AddToEach(hard_stops, 4)
else:
expected = self._AddToEach(expected, 4)
hard_stops |= self._AddToEach(hard_stops, 4)
in_same_continuation = True
# Handle hard stops after (, [, return, =, and ?
if self._IsHardStop(token):
override_is_hard_stop = (token_info.overridden_by and
self._IsHardStop(
token_info.overridden_by.token))
if not override_is_hard_stop:
start_index = token.start_index
if token.line_number in self._start_index_offset:
start_index += self._start_index_offset[token.line_number]
if (token.type in (Type.START_PAREN, Type.START_PARAMETERS) and
not token_info.overridden_by):
hard_stops.add(start_index + 1)
elif token.string == 'return' and not token_info.overridden_by:
hard_stops.add(start_index + 7)
elif token.type == Type.START_BRACKET:
hard_stops.add(start_index + 1)
elif token.IsAssignment():
hard_stops.add(start_index + len(token.string) + 1)
elif token.IsOperator('?') and not token_info.overridden_by:
hard_stops.add(start_index + 2)
return (expected | hard_stops) or set([0])
def _GetActualIndentation(self, token):
"""Gets the actual indentation of the line containing the given token.
Args:
token: Any token on the line.
Returns:
The actual indentation of the line containing the given token. Returns
-1 if this line should be ignored due to the presence of tabs.
"""
# Move to the first token in the line
token = tokenutil.GetFirstTokenInSameLine(token)
# If it is whitespace, it is the indentation.
if token.type == Type.WHITESPACE:
if token.string.find('\t') >= 0:
return -1
else:
return len(token.string)
elif token.type == Type.PARAMETERS:
return len(token.string) - len(token.string.lstrip())
else:
return 0
def _IsFirstNonWhitespaceTokenInLine(self, token):
"""Determines if the given token is the first non-space token on its line.
Args:
token: The token.
Returns:
True if the token is the first non-whitespace token on its line.
"""
if token.type in (Type.WHITESPACE, Type.BLANK_LINE):
return False
if token.IsFirstInLine():
return True
return (token.previous and token.previous.IsFirstInLine() and
token.previous.type == Type.WHITESPACE)
def _IsLastCodeInLine(self, token):
"""Determines if the given token is the last code token on its line.
Args:
token: The token.
Returns:
True if the token is the last code token on its line.
"""
if token.type in Type.NON_CODE_TYPES:
return False
start_token = token
while True:
token = token.next
if not token or token.line_number != start_token.line_number:
return True
if token.type not in Type.NON_CODE_TYPES:
return False
def _Add(self, token_info):
"""Adds the given token info to the stack.
Args:
token_info: The token information to add.
"""
if self._stack and self._stack[-1].token == token_info.token:
# Don't add the same token twice.
return
if token_info.is_block or token_info.token.type == Type.START_PAREN:
token_info.overridden_by = (
tokenutil.GoogScopeOrNoneFromStartBlock(token_info.token))
index = 1
while index <= len(self._stack):
stack_info = self._stack[-index]
stack_token = stack_info.token
if stack_info.line_number == token_info.line_number:
# In general, tokens only override each other when they are on
# the same line.
stack_info.overridden_by = token_info
if (token_info.token.type == Type.START_BLOCK and
(stack_token.IsAssignment() or
stack_token.type in (Type.IDENTIFIER, Type.START_PAREN))):
# Multi-line blocks have lasting overrides, as in:
# callFn({
# a: 10
# },
# 30);
# b/11450054. If a string is not closed properly then close_block
# could be null.
close_block = token_info.token.metadata.context.end_token
stack_info.is_permanent_override = close_block and (
close_block.line_number != token_info.token.line_number)
elif (token_info.token.type == Type.START_BLOCK and
token_info.token.metadata.context.type == Context.BLOCK and
(stack_token.IsAssignment() or
stack_token.type == Type.IDENTIFIER)):
# When starting a function block, the override can transcend lines.
# For example
# long.long.name = function(
# a) {
# In this case the { and the = are on different lines. But the
# override should still apply.
stack_info.overridden_by = token_info
stack_info.is_permanent_override = True
else:
break
index += 1
self._stack.append(token_info)
def _Pop(self):
"""Pops the top token from the stack.
Returns:
The popped token info.
"""
token_info = self._stack.pop()
if token_info.token.type not in (Type.START_BLOCK, Type.START_BRACKET):
# Remove any temporary overrides.
self._RemoveOverrides(token_info)
else:
# For braces and brackets, which can be object and array literals, remove
# overrides when the literal is closed on the same line.
token_check = token_info.token
same_type = token_check.type
goal_type = None
if token_info.token.type == Type.START_BRACKET:
goal_type = Type.END_BRACKET
else:
goal_type = Type.END_BLOCK
line_number = token_info.token.line_number
count = 0
while token_check and token_check.line_number == line_number:
if token_check.type == goal_type:
count -= 1
if not count:
self._RemoveOverrides(token_info)
break
if token_check.type == same_type:
count += 1
token_check = token_check.next
return token_info
def _PopToImpliedBlock(self):
"""Pops the stack until an implied block token is found."""
while not self._Pop().token.metadata.is_implied_block:
pass
def _PopTo(self, stop_type):
"""Pops the stack until a token of the given type is popped.
Args:
stop_type: The type of token to pop to.
Returns:
The token info of the given type that was popped.
"""
last = None
while True:
last = self._Pop()
if last.token.type == stop_type:
break
return last
def _RemoveOverrides(self, token_info):
"""Marks any token that was overridden by this token as active again.
Args:
token_info: The token that is being removed from the stack.
"""
for stack_token in self._stack:
if (stack_token.overridden_by == token_info and
not stack_token.is_permanent_override):
stack_token.overridden_by = None
def _PopTransient(self):
"""Pops all transient tokens - i.e. not blocks, literals, or parens."""
while self._stack and self._stack[-1].is_transient:
self._Pop()
|
|
"""This module contains async actions used to interact with Google Pub/Sub.
Note - this code should be structured so that it could be factored out into
a seperate package at some point in the future.
Key references
- Google Cloud Pub/Sub RESTful API @ https://cloud.google.com/pubsub/docs/reference/rest/
- note use of urllib.quote_plus() to encode topic and subscription names
as per guideance @ https://cloud.google.com/pubsub/docs/overview#names
"""
import base64
import httplib
import json
import logging
import urllib
import tornado.httpclient
import tor_async_google
_logger = logging.getLogger(__name__)
class AsyncGeneratePubSubAccessToken(tor_async_google.AsyncGenerateAccessToken):
"""Async'ly generate an access token scoped to pub/sub operations
from a service account's credentials file.
"""
def __init__(self, credentials_filename, async_state=None):
tor_async_google.AsyncGenerateAccessToken.__init__(
self,
credentials_filename,
'https://www.googleapis.com/auth/pubsub',
async_state)
class HTTPRequest(tor_async_google.HTTPRequest):
"""All async HTTP Requests for the Google Cloud Pubsub service
should be instances of this class. See ```tor_async_google.HTTPRequest```
for the pitch on the design approach/intent.
"""
def __init__(self, *args, **kwargs):
args = tuple(['pubsub'] + list(args))
tor_async_google.HTTPRequest.__init__(self, *args, **kwargs)
class AsyncAction(tor_async_google.AsyncAction):
"""An abstract base class for all pub/sub async actions."""
def create_log_msg_for_google_pubsub_http_client_response(self, response):
return self.create_log_msg_for_http_client_response(response, 'Google Pub/Sub')
class AsyncGetTopic(AsyncAction):
"""Async'ly get a topic. Since there's really nothing to get
for a topic this is really just a "does the topic exist" type
of operation.
References
- RESTful API @ https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/list
"""
# GFD = Get Failure Detail
GFD_OK = 0x0000
GFD_ERROR = 0x0080
GFD_ERROR_GETTING_TOPIC = GFD_ERROR | 0x0001
def __init__(self, access_token, topic, async_state=None):
AsyncAction.__init__(self, async_state)
self.access_token = access_token
self.topic = topic
self.get_failure_detail = None
self._callback = None
def get(self, callback):
assert self._callback is None
self._callback = callback
request = HTTPRequest(
self.access_token.project_id,
self.access_token,
'/topics/%s' % urllib.quote_plus(self.topic),
method='GET')
http_client = tornado.httpclient.AsyncHTTPClient()
http_client.fetch(
request,
callback=self._on_http_client_fetch_done)
def _on_http_client_fetch_done(self, response):
_logger.info(self.create_log_msg_for_google_pubsub_http_client_response(response))
if response.code == httplib.OK:
_logger.info("Found topic '%s'", self.topic)
self._call_callback(type(self).GFD_OK, True)
return
if response.code == httplib.NOT_FOUND:
_logger.info("Could not find topic '%s'", self.topic)
self._call_callback(type(self).GFD_OK, False)
return
# :TODO: temporal failure or longer term?
# :TODO: temporal dealt with by exp back retry loop with fixed time end
self.log_error_with_json_response_body("Error getting topic '%s'" % self.topic, response)
self._call_callback(type(self).GFD_ERROR_GETTING_TOPIC)
def _call_callback(self, get_failure_detail, is_found=None):
assert self._callback is not None
assert self.get_failure_detail is None
self.get_failure_detail = get_failure_detail
is_ok = not bool(self.get_failure_detail & type(self).GFD_ERROR)
self._callback(is_ok, is_found if is_ok else None, self)
self._callback = None
class AsyncCreateTopic(AsyncAction):
"""Async'ly create a topic.
References
- RESTful API @ https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/create
"""
# CFD = Create Failure Detail
CFD_OK = 0x0000
CFD_ERROR = 0x0080
CFD_ERROR_CREATING_TOPIC = CFD_ERROR | 0x0001
def __init__(self, access_token, topic, async_state=None):
AsyncAction.__init__(self, async_state)
self.access_token = access_token
self.topic = topic
self.create_failure_detail = None
self._callback = None
def create(self, callback):
assert self._callback is None
self._callback = callback
_logger.info("Creating topic '%s'", self.topic)
body = {
}
request = HTTPRequest(
self.access_token.project_id,
self.access_token,
'/topics/%s' % urllib.quote_plus(self.topic),
method='PUT',
body=json.dumps(body))
http_client = tornado.httpclient.AsyncHTTPClient()
http_client.fetch(
request,
callback=self._on_http_client_fetch_done)
def _on_http_client_fetch_done(self, response):
_logger.info(self.create_log_msg_for_google_pubsub_http_client_response(response))
if response.code != httplib.OK:
self.log_error_with_json_response_body("Error creating topic '%s'" % self.topic, response)
self._call_callback(type(self).CFD_ERROR_CREATING_TOPIC)
# :TODO: temporal failure or longer term?
# :TODO: temporal dealt with by exp back retry loop with fixed time end
return
_logger.info("Successfully created topic '%s'", self.topic)
self._call_callback(type(self).CFD_OK)
def _call_callback(self, create_failure_detail):
assert self._callback is not None
assert self.create_failure_detail is None
self.create_failure_detail = create_failure_detail
is_ok = not bool(self.create_failure_detail & type(self).CFD_ERROR)
self._callback(is_ok, self)
self._callback = None
class AsyncDeleteTopic(AsyncAction):
"""Async'ly delete a topic.
References
- RESTful API @ https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/delete
"""
# DFD = Delete Failure Detail
DFD_OK = 0x0000
DFD_ERROR = 0x0080
DFD_ERROR_DELETING_TOPIC = DFD_ERROR | 0x0001
def __init__(self, access_token, topic, async_state=None):
AsyncAction.__init__(self, async_state)
self.access_token = access_token
self.topic = topic
self.delete_failure_detail = None
self._callback = None
def delete(self, callback):
assert self._callback is None
self._callback = callback
_logger.info("Deleting topic '%s'", self.topic)
request = HTTPRequest(
self.access_token.project_id,
self.access_token,
'/topics/%s' % urllib.quote_plus(self.topic),
method='DELETE')
http_client = tornado.httpclient.AsyncHTTPClient()
http_client.fetch(
request,
callback=self._on_http_client_fetch_done)
def _on_http_client_fetch_done(self, response):
_logger.info(self.create_log_msg_for_google_pubsub_http_client_response(response))
if response.code != httplib.OK:
self.log_error_with_json_response_body("Error deleting topic '%s'" % self.topic, response)
self._call_callback(type(self).DFD_ERROR_DELETING_TOPIC)
# :TODO: temporal failure or longer term?
# :TODO: temporal dealt with by exp back retry loop with fixed time end
return
_logger.info("Successfully deleted topic '%s'", self.topic)
self._call_callback(type(self).DFD_OK)
def _call_callback(self, delete_failure_detail):
assert self._callback is not None
assert self.delete_failure_detail is None
self.delete_failure_detail = delete_failure_detail
is_ok = not bool(self.delete_failure_detail & type(self).DFD_ERROR)
self._callback(is_ok, self)
self._callback = None
class AsyncGetSubscription(AsyncAction):
"""Async'ly get a subscription.
Note, for now, until we need it, this class is simply returning notification that a
subscription has been found. For reference, the json doc below is what's available
{
"topic": "projects/cloudfeaster/topics/gaming_spiders.mindgames.MindGamesSpider",
"ackDeadlineSeconds": 45,
"pushConfig": {},
"name": "projects/cloudfeaster/subscriptions/gaming_spiders.mindgames.MindGamesSpider",
"messageRetentionDuration": "604800s"
}
References
- RESTful API @ https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/get
"""
# GFD = Get Failure Detail
GFD_OK = 0x0000
GFD_ERROR = 0x0080
GFD_ERROR_GETTING_SUBSCRIPTION = GFD_ERROR | 0x0001
def __init__(self, access_token, subscription, async_state=None):
AsyncAction.__init__(self, async_state)
self.access_token = access_token
self.subscription = subscription
self.get_failure_detail = None
self._callback = None
def get(self, callback):
assert self._callback is None
self._callback = callback
request = HTTPRequest(
self.access_token.project_id,
self.access_token,
'/subscriptions/%s' % urllib.quote_plus(self.subscription),
method='GET')
http_client = tornado.httpclient.AsyncHTTPClient()
http_client.fetch(
request,
callback=self._on_http_client_fetch_done)
def _on_http_client_fetch_done(self, response):
_logger.info(self.create_log_msg_for_google_pubsub_http_client_response(response))
if response.code == httplib.OK:
_logger.info("Found subscription '%s'", self.subscription)
self._call_callback(type(self).GFD_OK, True)
return
if response.code == httplib.NOT_FOUND:
_logger.info("Could not find subscription '%s'", self.subscription)
self._call_callback(type(self).GFD_OK, False)
return
self.log_error_with_json_response_body("Error getting subscription '%s'" % self.subscription, response)
self._call_callback(type(self).GFD_ERROR_GETTING_SUBSCRIPTION)
# :TODO: temporal failure or longer term?
# :TODO: temporal dealt with by exp back retry loop with fixed time end
def _call_callback(self, get_failure_detail, is_found=None):
assert self._callback is not None
assert self.get_failure_detail is None
self.get_failure_detail = get_failure_detail
is_ok = not bool(self.get_failure_detail & type(self).GFD_ERROR)
self._callback(is_ok, is_found if is_ok else None, self)
self._callback = None
class AsyncCreatePushSubscription(AsyncAction):
"""Async'ly create a push subscription.
References
- RESTful API @ https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/create
"""
# CFD = Create Failure Detail
CFD_OK = 0x0000
CFD_ERROR = 0x0080
CFD_ERROR_CREATING_SUBSCRIPTION = CFD_ERROR | 0x0001
def __init__(self, access_token, topic, subscription, ack_deadline_in_seconds, push_endpoint, async_state=None):
AsyncAction.__init__(self, async_state)
self.access_token = access_token
self.topic = topic
self.subscription = subscription
self.ack_deadline_in_seconds = ack_deadline_in_seconds
self.push_endpoint = push_endpoint
self.create_failure_detail = None
self._callback = None
def create(self, callback):
assert self._callback is None
self._callback = callback
_logger.info("Creating subscription '%s' for topic '%s'", self.subscription, self.topic)
body = {
'topic': 'projects/%s/topics/%s' % (self.access_token.project_id, urllib.quote_plus(self.topic)),
'ackDeadlineSeconds': self.ack_deadline_in_seconds,
'pushConfig': {
'pushEndpoint': self.push_endpoint,
# should probably use 'attributes': { 'x-goog-version': 'v1' }
},
}
request = HTTPRequest(
self.access_token.project_id,
self.access_token,
'/subscriptions/%s' % urllib.quote_plus(self.subscription),
method='PUT',
body=json.dumps(body))
http_client = tornado.httpclient.AsyncHTTPClient()
http_client.fetch(
request,
callback=self._on_http_client_fetch_done)
def _on_http_client_fetch_done(self, response):
_logger.info(self.create_log_msg_for_google_pubsub_http_client_response(response))
if response.code != httplib.OK:
self.log_error_with_json_response_body(
"Error creating subscription '%s' for topic '%s'" % (self.subscription, self.topic),
response)
self._call_callback(type(self).CFD_ERROR_CREATING_SUBSCRIPTION)
# :TODO: temporal failure or longer term?
# :TODO: temporal dealt with by exp back retry loop with fixed time end
return
_logger.info("Successfully created subscription '%s' for topic '%s'", self.subscription, self.topic)
self._call_callback(type(self).CFD_OK)
def _call_callback(self, create_failure_detail):
assert self._callback is not None
assert self.create_failure_detail is None
self.create_failure_detail = create_failure_detail
is_ok = not bool(self.create_failure_detail & type(self).CFD_ERROR)
self._callback(is_ok, self)
self._callback = None
class AsyncDeleteSubscription(AsyncAction):
"""Async'ly delete a subscription.
References
- RESTful API @ https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/delete
"""
# DFD = Delete Failure Detail
DFD_OK = 0x0000
DFD_ERROR = 0x0080
DFD_ERROR_DELETING_SUBSCRIPTION = DFD_ERROR | 0x0001
def __init__(self, access_token, subscription, async_state=None):
AsyncAction.__init__(self, async_state)
self.access_token = access_token
self.subscription = subscription
self.delete_failure_detail = None
self._callback = None
def delete(self, callback):
assert self._callback is None
self._callback = callback
_logger.info("Deleting subscription '%s'", self.subscription)
request = HTTPRequest(
self.access_token.project_id,
self.access_token,
'/subscriptions/%s' % urllib.quote_plus(self.subscription),
method='DELETE')
http_client = tornado.httpclient.AsyncHTTPClient()
http_client.fetch(
request,
callback=self._on_http_client_fetch_done)
def _on_http_client_fetch_done(self, response):
_logger.info(self.create_log_msg_for_google_pubsub_http_client_response(response))
if response.code != httplib.OK:
self.log_error_with_json_response_body("Error deleting subscription '%s'" % self.subscription, response)
self._call_callback(type(self).DFD_ERROR_DELETING_SUBSCRIPTION)
# :TODO: temporal failure or longer term?
# :TODO: temporal dealt with by exp back retry loop with fixed time end
return
_logger.info("Successfully deleted subscription '%s'", self.subscription)
self._call_callback(type(self).DFD_OK)
def _call_callback(self, delete_failure_detail):
assert self._callback is not None
assert self.delete_failure_detail is None
self.delete_failure_detail = delete_failure_detail
is_ok = not bool(self.delete_failure_detail & type(self).DFD_ERROR)
self._callback(is_ok, self)
self._callback = None
class AsyncPublishMessage(AsyncAction):
"""Async'ly publish a message to an existing topic.
References
- RESTful API @ https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/publish
"""
# PFD = Publish Failure Detail
PFD_OK = 0x0000
PFD_ERROR = 0x0080
PFD_ERROR_PUBLISHING_MESSAGE = PFD_ERROR | 0x0001
def __init__(self,
access_token,
topic,
data,
async_state=None):
AsyncAction.__init__(self, async_state)
self.access_token = access_token
self.topic = topic
self.data = data
self.publish_failure_detail = None
self._callback = None
def publish(self, callback):
assert self._callback is None
self._callback = callback
body = {
'messages': [
{
'data': base64.b64encode(self.data),
},
],
}
request = HTTPRequest(
self.access_token.project_id,
self.access_token,
'/topics/%s:publish' % urllib.quote_plus(self.topic),
method='POST',
body=json.dumps(body))
http_client = tornado.httpclient.AsyncHTTPClient()
http_client.fetch(
request,
callback=self._on_http_client_fetch_done)
def _on_http_client_fetch_done(self, response):
_logger.info(self.create_log_msg_for_google_pubsub_http_client_response(response))
if response.code != httplib.OK:
self.log_error_with_json_response_body('Error publishing message', response)
self._call_callback(type(self).PFD_ERROR_PUBLISHING_MESSAGE)
# :TODO: temporal failure or longer term?
# :TODO: temporal dealt with by exp back retry loop with fixed time end
return
_logger.info('Successfully published message')
self._call_callback(type(self).PFD_OK)
def _call_callback(self, publish_failure_detail):
assert self._callback is not None
assert self.publish_failure_detail is None
self.publish_failure_detail = publish_failure_detail
is_ok = not bool(self.publish_failure_detail & type(self).PFD_ERROR)
self._callback(is_ok, self)
self._callback = None
|
|
import sys
from decimal import Decimal
import numpy as np
from numpy.testing import *
from numpy.testing.utils import WarningManager
import warnings
class TestEinSum(TestCase):
def test_einsum_errors(self):
# Need enough arguments
assert_raises(ValueError, np.einsum)
assert_raises(ValueError, np.einsum, "")
# subscripts must be a string
assert_raises(TypeError, np.einsum, 0, 0)
# out parameter must be an array
assert_raises(TypeError, np.einsum, "", 0, out='test')
# order parameter must be a valid order
assert_raises(TypeError, np.einsum, "", 0, order='W')
# casting parameter must be a valid casting
assert_raises(ValueError, np.einsum, "", 0, casting='blah')
# dtype parameter must be a valid dtype
assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type')
# other keyword arguments are rejected
assert_raises(TypeError, np.einsum, "", 0, bad_arg=0)
# number of operands must match count in subscripts string
assert_raises(ValueError, np.einsum, "", 0, 0)
assert_raises(ValueError, np.einsum, ",", 0, [0], [0])
assert_raises(ValueError, np.einsum, ",", [0])
# can't have more subscripts than dimensions in the operand
assert_raises(ValueError, np.einsum, "i", 0)
assert_raises(ValueError, np.einsum, "ij", [0,0])
assert_raises(ValueError, np.einsum, "...i", 0)
assert_raises(ValueError, np.einsum, "i...j", [0,0])
assert_raises(ValueError, np.einsum, "i...", 0)
assert_raises(ValueError, np.einsum, "ij...", [0,0])
# invalid ellipsis
assert_raises(ValueError, np.einsum, "i..", [0,0])
assert_raises(ValueError, np.einsum, ".i...", [0,0])
assert_raises(ValueError, np.einsum, "j->..j", [0,0])
assert_raises(ValueError, np.einsum, "j->.j...", [0,0])
# invalid subscript character
assert_raises(ValueError, np.einsum, "i%...", [0,0])
assert_raises(ValueError, np.einsum, "...j$", [0,0])
assert_raises(ValueError, np.einsum, "i->&", [0,0])
# output subscripts must appear in input
assert_raises(ValueError, np.einsum, "i->ij", [0,0])
# output subscripts may only be specified once
assert_raises(ValueError, np.einsum, "ij->jij", [[0,0],[0,0]])
# dimensions much match when being collapsed
assert_raises(ValueError, np.einsum, "ii", np.arange(6).reshape(2,3))
assert_raises(ValueError, np.einsum, "ii->i", np.arange(6).reshape(2,3))
# broadcasting to new dimensions must be enabled explicitly
assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2,3))
assert_raises(ValueError, np.einsum, "i->i", [[0,1],[0,1]],
out=np.arange(4).reshape(2,2))
def test_einsum_views(self):
# pass-through
a = np.arange(6).reshape(2,3)
b = np.einsum("...", a)
assert_(b.base is a)
b = np.einsum(a, [Ellipsis])
assert_(b.base is a)
b = np.einsum("ij", a)
assert_(b.base is a)
assert_equal(b, a)
b = np.einsum(a, [0,1])
assert_(b.base is a)
assert_equal(b, a)
# transpose
a = np.arange(6).reshape(2,3)
b = np.einsum("ji", a)
assert_(b.base is a)
assert_equal(b, a.T)
b = np.einsum(a, [1,0])
assert_(b.base is a)
assert_equal(b, a.T)
# diagonal
a = np.arange(9).reshape(3,3)
b = np.einsum("ii->i", a)
assert_(b.base is a)
assert_equal(b, [a[i,i] for i in range(3)])
b = np.einsum(a, [0,0], [0])
assert_(b.base is a)
assert_equal(b, [a[i,i] for i in range(3)])
# diagonal with various ways of broadcasting an additional dimension
a = np.arange(27).reshape(3,3,3)
b = np.einsum("...ii->...i", a)
assert_(b.base is a)
assert_equal(b, [[x[i,i] for i in range(3)] for x in a])
b = np.einsum(a, [Ellipsis,0,0], [Ellipsis,0])
assert_(b.base is a)
assert_equal(b, [[x[i,i] for i in range(3)] for x in a])
b = np.einsum("ii...->...i", a)
assert_(b.base is a)
assert_equal(b, [[x[i,i] for i in range(3)]
for x in a.transpose(2,0,1)])
b = np.einsum(a, [0,0,Ellipsis], [Ellipsis,0])
assert_(b.base is a)
assert_equal(b, [[x[i,i] for i in range(3)]
for x in a.transpose(2,0,1)])
b = np.einsum("...ii->i...", a)
assert_(b.base is a)
assert_equal(b, [a[:,i,i] for i in range(3)])
b = np.einsum(a, [Ellipsis,0,0], [0,Ellipsis])
assert_(b.base is a)
assert_equal(b, [a[:,i,i] for i in range(3)])
b = np.einsum("jii->ij", a)
assert_(b.base is a)
assert_equal(b, [a[:,i,i] for i in range(3)])
b = np.einsum(a, [1,0,0], [0,1])
assert_(b.base is a)
assert_equal(b, [a[:,i,i] for i in range(3)])
b = np.einsum("ii...->i...", a)
assert_(b.base is a)
assert_equal(b, [a.transpose(2,0,1)[:,i,i] for i in range(3)])
b = np.einsum(a, [0,0,Ellipsis], [0,Ellipsis])
assert_(b.base is a)
assert_equal(b, [a.transpose(2,0,1)[:,i,i] for i in range(3)])
b = np.einsum("i...i->i...", a)
assert_(b.base is a)
assert_equal(b, [a.transpose(1,0,2)[:,i,i] for i in range(3)])
b = np.einsum(a, [0,Ellipsis,0], [0,Ellipsis])
assert_(b.base is a)
assert_equal(b, [a.transpose(1,0,2)[:,i,i] for i in range(3)])
b = np.einsum("i...i->...i", a)
assert_(b.base is a)
assert_equal(b, [[x[i,i] for i in range(3)]
for x in a.transpose(1,0,2)])
b = np.einsum(a, [0,Ellipsis,0], [Ellipsis,0])
assert_(b.base is a)
assert_equal(b, [[x[i,i] for i in range(3)]
for x in a.transpose(1,0,2)])
# triple diagonal
a = np.arange(27).reshape(3,3,3)
b = np.einsum("iii->i", a)
assert_(b.base is a)
assert_equal(b, [a[i,i,i] for i in range(3)])
b = np.einsum(a, [0,0,0], [0])
assert_(b.base is a)
assert_equal(b, [a[i,i,i] for i in range(3)])
# swap axes
a = np.arange(24).reshape(2,3,4)
b = np.einsum("ijk->jik", a)
assert_(b.base is a)
assert_equal(b, a.swapaxes(0,1))
b = np.einsum(a, [0,1,2], [1,0,2])
assert_(b.base is a)
assert_equal(b, a.swapaxes(0,1))
def check_einsum_sums(self, dtype):
# Check various sums. Does many sizes to exercise unrolled loops.
# sum(a, axis=-1)
for n in range(1,17):
a = np.arange(n, dtype=dtype)
assert_equal(np.einsum("i->", a), np.sum(a, axis=-1).astype(dtype))
assert_equal(np.einsum(a, [0], []),
np.sum(a, axis=-1).astype(dtype))
for n in range(1,17):
a = np.arange(2*3*n, dtype=dtype).reshape(2,3,n)
assert_equal(np.einsum("...i->...", a),
np.sum(a, axis=-1).astype(dtype))
assert_equal(np.einsum(a, [Ellipsis,0], [Ellipsis]),
np.sum(a, axis=-1).astype(dtype))
# sum(a, axis=0)
for n in range(1,17):
a = np.arange(2*n, dtype=dtype).reshape(2,n)
assert_equal(np.einsum("i...->...", a),
np.sum(a, axis=0).astype(dtype))
assert_equal(np.einsum(a, [0,Ellipsis], [Ellipsis]),
np.sum(a, axis=0).astype(dtype))
for n in range(1,17):
a = np.arange(2*3*n, dtype=dtype).reshape(2,3,n)
assert_equal(np.einsum("i...->...", a),
np.sum(a, axis=0).astype(dtype))
assert_equal(np.einsum(a, [0,Ellipsis], [Ellipsis]),
np.sum(a, axis=0).astype(dtype))
# trace(a)
for n in range(1,17):
a = np.arange(n*n, dtype=dtype).reshape(n,n)
assert_equal(np.einsum("ii", a), np.trace(a).astype(dtype))
assert_equal(np.einsum(a, [0,0]), np.trace(a).astype(dtype))
# multiply(a, b)
for n in range(1,17):
a = np.arange(3*n, dtype=dtype).reshape(3,n)
b = np.arange(2*3*n, dtype=dtype).reshape(2,3,n)
assert_equal(np.einsum("..., ...", a, b), np.multiply(a, b))
assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis]),
np.multiply(a, b))
# inner(a,b)
for n in range(1,17):
a = np.arange(2*3*n, dtype=dtype).reshape(2,3,n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("...i, ...i", a, b), np.inner(a, b))
assert_equal(np.einsum(a, [Ellipsis,0], b, [Ellipsis,0]),
np.inner(a, b))
for n in range(1,11):
a = np.arange(n*3*2, dtype=dtype).reshape(n,3,2)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("i..., i...", a, b), np.inner(a.T, b.T).T)
assert_equal(np.einsum(a, [0,Ellipsis], b, [0,Ellipsis]),
np.inner(a.T, b.T).T)
# outer(a,b)
for n in range(1,17):
a = np.arange(3, dtype=dtype)+1
b = np.arange(n, dtype=dtype)+1
assert_equal(np.einsum("i,j", a, b), np.outer(a, b))
assert_equal(np.einsum(a, [0], b, [1]), np.outer(a, b))
# Suppress the complex warnings for the 'as f8' tests
ctx = WarningManager()
ctx.__enter__()
try:
warnings.simplefilter('ignore', np.ComplexWarning)
# matvec(a,b) / a.dot(b) where a is matrix, b is vector
for n in range(1,17):
a = np.arange(4*n, dtype=dtype).reshape(4,n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("ij, j", a, b), np.dot(a, b))
assert_equal(np.einsum(a, [0,1], b, [1]), np.dot(a, b))
c = np.arange(4, dtype=dtype)
np.einsum("ij,j", a, b, out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a, [0,1], b, [1], out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
for n in range(1,17):
a = np.arange(4*n, dtype=dtype).reshape(4,n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("ji,j", a.T, b.T), np.dot(b.T, a.T))
assert_equal(np.einsum(a.T, [1,0], b.T, [1]), np.dot(b.T, a.T))
c = np.arange(4, dtype=dtype)
np.einsum("ji,j", a.T, b.T, out=c, dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(b.T.astype('f8'),
a.T.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a.T, [1,0], b.T, [1], out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(b.T.astype('f8'),
a.T.astype('f8')).astype(dtype))
# matmat(a,b) / a.dot(b) where a is matrix, b is matrix
for n in range(1,17):
if n < 8 or dtype != 'f2':
a = np.arange(4*n, dtype=dtype).reshape(4,n)
b = np.arange(n*6, dtype=dtype).reshape(n,6)
assert_equal(np.einsum("ij,jk", a, b), np.dot(a, b))
assert_equal(np.einsum(a, [0,1], b, [1,2]), np.dot(a, b))
for n in range(1,17):
a = np.arange(4*n, dtype=dtype).reshape(4,n)
b = np.arange(n*6, dtype=dtype).reshape(n,6)
c = np.arange(24, dtype=dtype).reshape(4,6)
np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a, [0,1], b, [1,2], out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
# matrix triple product (note this is not currently an efficient
# way to multiply 3 matrices)
a = np.arange(12, dtype=dtype).reshape(3,4)
b = np.arange(20, dtype=dtype).reshape(4,5)
c = np.arange(30, dtype=dtype).reshape(5,6)
if dtype != 'f2':
assert_equal(np.einsum("ij,jk,kl", a, b, c),
a.dot(b).dot(c))
assert_equal(np.einsum(a, [0,1], b, [1,2], c, [2,3]),
a.dot(b).dot(c))
d = np.arange(18, dtype=dtype).reshape(3,6)
np.einsum("ij,jk,kl", a, b, c, out=d,
dtype='f8', casting='unsafe')
assert_equal(d, a.astype('f8').dot(b.astype('f8')
).dot(c.astype('f8')).astype(dtype))
d[...] = 0
np.einsum(a, [0,1], b, [1,2], c, [2,3], out=d,
dtype='f8', casting='unsafe')
assert_equal(d, a.astype('f8').dot(b.astype('f8')
).dot(c.astype('f8')).astype(dtype))
# tensordot(a, b)
if np.dtype(dtype) != np.dtype('f2'):
a = np.arange(60, dtype=dtype).reshape(3,4,5)
b = np.arange(24, dtype=dtype).reshape(4,3,2)
assert_equal(np.einsum("ijk, jil -> kl", a, b),
np.tensordot(a,b, axes=([1,0],[0,1])))
assert_equal(np.einsum(a, [0,1,2], b, [1,0,3], [2,3]),
np.tensordot(a,b, axes=([1,0],[0,1])))
c = np.arange(10, dtype=dtype).reshape(5,2)
np.einsum("ijk,jil->kl", a, b, out=c,
dtype='f8', casting='unsafe')
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
axes=([1,0],[0,1])).astype(dtype))
c[...] = 0
np.einsum(a, [0,1,2], b, [1,0,3], [2,3], out=c,
dtype='f8', casting='unsafe')
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
axes=([1,0],[0,1])).astype(dtype))
finally:
ctx.__exit__()
# logical_and(logical_and(a!=0, b!=0), c!=0)
a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype)
b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype)
c = np.array([True,True,False,True,True,False,True,True])
assert_equal(np.einsum("i,i,i->i", a, b, c,
dtype='?', casting='unsafe'),
np.logical_and(np.logical_and(a!=0, b!=0), c!=0))
assert_equal(np.einsum(a, [0], b, [0], c, [0], [0],
dtype='?', casting='unsafe'),
np.logical_and(np.logical_and(a!=0, b!=0), c!=0))
a = np.arange(9, dtype=dtype)
assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a))
assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a))
assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a))
assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a))
# Various stride0, contiguous, and SSE aligned variants
for n in range(1,25):
a = np.arange(n, dtype=dtype)
if np.dtype(dtype).itemsize > 1:
assert_equal(np.einsum("...,...",a,a), np.multiply(a,a))
assert_equal(np.einsum("i,i", a, a), np.dot(a,a))
assert_equal(np.einsum("i,->i", a, 2), 2*a)
assert_equal(np.einsum(",i->i", 2, a), 2*a)
assert_equal(np.einsum("i,->", a, 2), 2*np.sum(a))
assert_equal(np.einsum(",i->", 2, a), 2*np.sum(a))
assert_equal(np.einsum("...,...",a[1:],a[:-1]),
np.multiply(a[1:],a[:-1]))
assert_equal(np.einsum("i,i", a[1:], a[:-1]),
np.dot(a[1:],a[:-1]))
assert_equal(np.einsum("i,->i", a[1:], 2), 2*a[1:])
assert_equal(np.einsum(",i->i", 2, a[1:]), 2*a[1:])
assert_equal(np.einsum("i,->", a[1:], 2), 2*np.sum(a[1:]))
assert_equal(np.einsum(",i->", 2, a[1:]), 2*np.sum(a[1:]))
# An object array, summed as the data type
a = np.arange(9, dtype=object)
b = np.einsum("i->", a, dtype=dtype, casting='unsafe')
assert_equal(b, np.sum(a))
assert_equal(b.dtype, np.dtype(dtype))
b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe')
assert_equal(b, np.sum(a))
assert_equal(b.dtype, np.dtype(dtype))
# A case which was failing (ticket #1885)
p = np.arange(2) + 1
q = np.arange(4).reshape(2,2) + 3
r = np.arange(4).reshape(2,2) + 7
assert_equal(np.einsum('z,mz,zm->', p, q, r), 253)
def test_einsum_sums_int8(self):
self.check_einsum_sums('i1');
def test_einsum_sums_uint8(self):
self.check_einsum_sums('u1');
def test_einsum_sums_int16(self):
self.check_einsum_sums('i2');
def test_einsum_sums_uint16(self):
self.check_einsum_sums('u2');
def test_einsum_sums_int32(self):
self.check_einsum_sums('i4');
def test_einsum_sums_uint32(self):
self.check_einsum_sums('u4');
def test_einsum_sums_int64(self):
self.check_einsum_sums('i8');
def test_einsum_sums_uint64(self):
self.check_einsum_sums('u8');
def test_einsum_sums_float16(self):
self.check_einsum_sums('f2');
def test_einsum_sums_float32(self):
self.check_einsum_sums('f4');
def test_einsum_sums_float64(self):
self.check_einsum_sums('f8');
def test_einsum_sums_longdouble(self):
self.check_einsum_sums(np.longdouble);
def test_einsum_sums_cfloat64(self):
self.check_einsum_sums('c8');
def test_einsum_sums_cfloat128(self):
self.check_einsum_sums('c16');
def test_einsum_sums_clongdouble(self):
self.check_einsum_sums(np.clongdouble);
def test_einsum_misc(self):
# This call used to crash because of a bug in
# PyArray_FillWithZero
a = np.ones((1,2))
b = np.ones((2,2,1))
assert_equal(np.einsum('ij...,j...->i...',a,b), [[[2],[2]]])
# The iterator had an issue with buffering this reduction
a = np.ones((5, 12, 4, 2, 3), np.int64)
b = np.ones((5, 12, 11), np.int64)
assert_equal(np.einsum('ijklm,ijn,ijn->',a,b,b),
np.einsum('ijklm,ijn->',a,b))
if __name__ == "__main__":
run_module_suite()
|
|
#! /usr/bin/env python3
# encoding: utf-8
#
# (C) 2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
"""\
WiPy helper tool to access file via FTP.
"""
import configparser # https://docs.python.org/3/library/configparser.html
import ftplib # https://docs.python.org/3/library/ftplib.html
import glob
import io
import logging
import os
import shutil
import sys
import posixpath
import datetime
INI_TEMPLATE = """\
[FTP]
server = 192.168.1.1
user = micro
pass = python
"""
class WiPySimulator(object):
def __init__(self, root_directory):
self.root = os.path.abspath(root_directory)
self.log = logging.getLogger('FTP')
self.log.debug('WiPy FTP Simulator in {}'.format(self.root))
if not os.path.exists(os.path.join(self.root, 'flash')):
os.mkdir(os.path.join(self.root, 'flash'))
os.mkdir(os.path.join(self.root, 'flash', 'lib'))
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
def ls(self, path=None):
"""List files, meant for interactive use"""
if path is None:
path = '/'
print(os.listdir(os.path.join(self.root, path)))
def walk(self, root):
yield from os.walk(os.path.join(self.root, posixpath.relpath(root, '/')))
def makedirs(self, dirname):
"""Recursively create directories, if not yet existing"""
self.log.info('makedirs {}'.format(dirname))
os.makedirs(os.path.join(self.root, os.path.relpath(dirname, '/')), exist_ok=True)
def put(self, filename, fileobj):
"""send binary file"""
self.log.info('put {}'.format(filename))
with open(os.path.join(self.root, os.path.relpath(filename, '/')), 'wb') as dst:
shutil.copyfileobj(fileobj, dst)
def get(self, filename, fileobj):
"""receive binary file"""
self.log.info('get {}'.format(filename))
with open(os.path.join(self.root, os.path.relpath(filename, '/')), 'rb') as src:
shutil.copyfileobj(src, fileobj)
class WiPyFTP(object):
def __init__(self, read_ini='wipy-ftp.ini'):
self.ftp = None
self.log = logging.getLogger('FTP')
self.config = configparser.RawConfigParser()
self.config.read_string(INI_TEMPLATE)
if read_ini is not None:
if os.path.exists(read_ini):
self.config.read(read_ini)
else:
logging.warning('"{}" not found, using defaults'.format(read_ini))
self.log.debug('WiPy IP: {}'.format(self.config['FTP']['server']))
self.log.debug('FTP user: {}'.format(self.config['FTP']['user']))
self.log.debug('FTP pass: {}'.format(self.config['FTP']['pass']))
def __enter__(self):
self.log.debug('Connecting...')
self.ftp = ftplib.FTP(self.config['FTP']['server'])
self.ftp.login(self.config['FTP']['user'], self.config['FTP']['pass'])
self.log.debug('Connection OK')
return self
def __exit__(self, *args, **kwargs):
self.log.debug('Disconnecting...')
self.ftp.quit()
def ls(self, path=None):
"""List files, meant for interactive use"""
if path is None:
path = '/'
try:
self.ftp.cwd(path)
self.log.debug('ls {}'.format(self.ftp.pwd()))
print(self.ftp.retrlines('LIST'))
except ftplib.error_perm as e:
self.log.error('invalid path: {} ({})'.format(path, e))
except ftplib.all_errors as e:
self.log.error('FTP error: {}'.format(e))
def walk(self, root):
"""recursively list files on target"""
self.log.debug('walk {}'.format(root))
try:
self.ftp.cwd(root)
lines = []
self.ftp.retrlines('LIST', lines.append)
items = [(x.startswith('d'), x[49:]) for x in lines]
dirs = [name for is_dir, name in items if is_dir]
files = [name for is_dir, name in items if not is_dir]
yield root, dirs, files
for r in dirs:
if root == '/':
yield from self.walk('/{}'.format(r))
else:
yield from self.walk('{}/{}'.format(root, r))
except ftplib.error_perm as e:
self.log.error('invalid path: {} ({})'.format(path, e))
except ftplib.all_errors as e:
self.log.error('FTP error: {}'.format(e))
def makedirs(self, dirname):
"""Recursively create directories, if not yet existing"""
self.log.info('makedirs {}'.format(dirname))
try:
self.ftp.cwd('/')
except ftplib.error_perm as e:
self.log.error('invalid path: {} ({})'.format(dirname, e))
for directory in dirname.split('/')[1:]:
try:
self.log.debug('cwd to {}'.format(directory))
self.ftp.cwd(directory)
except ftplib.error_perm as e:
self.log.info('creating directory: {} ({})'.format(dirname, e))
try:
self.ftp.mkd(directory)
self.ftp.cwd(directory)
except ftplib.error_perm as e:
self.log.error('error while creating directory: {} ({})'.format(dirname, e))
except ftplib.all_errors as e:
self.log.error('FTP error: {}'.format(e))
def put(self, filename, fileobj):
"""send binary file"""
try:
self.log.info('put {}'.format(filename))
self.ftp.storbinary("STOR " + filename, fileobj, 1024)
except ftplib.error_perm as e:
self.log.error('invalid path: {} ({})'.format(filename, e))
except ftplib.all_errors as e:
self.log.error('FTP error: {}'.format(e))
def get(self, filename, fileobj):
"""receive binary file"""
try:
self.log.info('get {}'.format(filename))
self.ftp.retrbinary("RETR " + filename, fileobj.write, 1024)
except ftplib.error_perm as e:
self.log.error('invalid path: {} ({})'.format(filename, e))
except ftplib.all_errors as e:
self.log.error('FTP error: {}'.format(e))
WLANCONFIG_TEMPLATE = """\
ssid = {ssid!r}
password = {password!r}
"""
ULOG_CONFIG_TEMPLATE = """\
import ulog
ulog.add_remote({ip!r}, {port})
"""
class WiPyActions():
def __init__(self, target):
self.target = target
def __enter__(self):
self.target.__enter__()
return self
def __exit__(self, *args, **kwargs):
self.target.__exit__()
pass
def ls(self, path=None):
""" lists directory entry """
self.target.ls(path)
def put(self, filename, fileobj):
self.target.put(filename, fileobj)
def get(self, filename, fileobj):
self.target.get(filename, fileobj)
def install_lib(self):
"""recursively copy /flash/lib"""
base_path = 'device/flash/lib'
for root, dirs, files in os.walk(base_path):
if '__pycache__' in dirs:
dirs.remove('__pycache__')
self.target.makedirs('/{}'.format(os.path.relpath(root, 'device')))
for filename in files:
remote_name = os.path.relpath(os.path.join(root, filename), 'device')
with open(os.path.join(root, filename), 'rb') as src:
remote_name = remote_name.replace('\\', '/')
self.target.put('/{}'.format(remote_name), src)
def install_top(self):
"""copy *.py in /flash"""
for filename in glob.glob('device/flash/*.py'):
with open(filename, 'rb') as src:
self.target.put('/flash/{}'.format(os.path.basename(filename)), src)
def config_wlan(self):
ssid = input('Enter SSID: ')
password = input('Enter passphrase: ')
self.target.put('/flash/wlanconfig.py',
io.BytesIO(WLANCONFIG_TEMPLATE.format(ssid=ssid, password=password).encode('utf-8')))
def config_ulog(self):
ip = input('Enter IP: ')
port = input('UDP port [514]: ')
if not port:
port = '514'
self.target.put('/flash/ulogconfig.py',
io.BytesIO(ULOG_CONFIG_TEMPLATE.format(ip=ip, port=int(port)).encode('utf-8')))
def backup(self):
"""Download all data from /flash"""
backup_dir = 'backup_{:%Y-%m-%d_%H_%M_%S}'.format(datetime.datetime.now())
logging.info('backing up /flash into {}'.format(backup_dir))
for root, dirs, files in self.target.walk('/flash'):
local_root = os.path.join(backup_dir, posixpath.relpath(root, '/'))
if not os.path.exists(local_root):
os.makedirs(local_root)
for name in files:
with open(os.path.join(local_root, name), 'wb') as dst:
self.target.get(posixpath.join(root, name), dst)
def main():
import argparse
parser = argparse.ArgumentParser(
description='WiPy copy tool',
epilog="""\
For configuration, a file called ``wipy-ftp.ini`` should be present. Run
"%(prog)s write-ini" to create one. Adapt as needed when connected via
router.
""")
parser.add_argument('action', type=lambda s: s.lower(), help='Action to execute, try "help"')
parser.add_argument('path', nargs='?', help='pathname used for some actions')
parser.add_argument('destination', nargs='?', help='target used for some actions')
parser.add_argument('-v', '--verbose', action='store_true', help='show more diagnostic messages')
parser.add_argument('--defaults', action='store_true', help='do not read ini file, use default settings')
parser.add_argument('--ini', help='alternate name for settings file (default: %(default)s)', default='wipy-ftp.ini')
parser.add_argument('--simulate', metavar='DIR', help='do not access WiPy, put files in given directory instead')
# parser.add_argument('--noexp', action='store_true', help='skip steps involving the expansion board and SD storage')
args = parser.parse_args()
#~ print(args)
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
if args.action == 'write-ini':
with open(args.ini, 'w') as f:
f.write(INI_TEMPLATE)
logging.info('"{}" written'.format(args.ini))
sys.exit(0)
if args.simulate:
logging.info('using simulator')
target = WiPySimulator(args.simulate)
else:
logging.info('using ftp')
target = WiPyFTP(None if args.defaults else args.ini)
if args.action == 'cp':
with WiPyActions(target) as wipy:
with open(args.path,'rb') as src:
wipy.put(args.destination, src)
elif args.action == 'cat':
with WiPyActions(target) as wipy:
wipy.get(args.path, sys.stdout.buffer)
elif args.action == 'ls':
with WiPyActions(target) as wipy:
wipy.ls(args.path)
elif args.action == 'sync-lib':
with WiPyActions(target) as wipy:
wipy.install_lib()
elif args.action == 'sync-top':
with WiPyActions(target) as wipy:
wipy.install_top()
elif args.action == 'install':
with WiPyActions(target) as wipy:
wipy.backup()
wipy.install_top()
wipy.install_lib()
if input('Connect to an access point? [Y/n]: ').upper() in ('', 'Y'):
wipy.config_wlan()
elif args.action == 'config-wlan':
with WiPyActions(target) as wipy:
print('Configure the WiPy to connect to an access point')
wipy.config_wlan()
elif args.action == 'config-ulog':
with WiPyActions(target) as wipy:
print('Configure the WiPy to send ulog (syslog compatible) messages to following IP address')
wipy.config_ulog()
elif args.action == 'fwupgrade':
with WiPyActions(target) as wipy:
print('upload /flash/sys/mcuimg.bin')
wipy.put('/flash/sys/mcuimg.bin', open('mcuimg.bin', 'rb'))
print('press reset button on WiPy to complete upgrade')
elif args.action == 'backup':
with WiPyActions(target) as wipy:
wipy.backup()
elif args.action == 'interact':
# local REPL loop with established FTP connection for development
with WiPyActions(target) as wipy:
import code
try:
import rlcompleter
import readline
except ImportError as e:
logging.warning('readline support failed: {}'.format(e))
else:
readline.set_completer(rlcompleter.Completer(locals()).complete)
readline.parse_and_bind("tab: complete")
code.interact(local=locals())
else:
sys.stdout.write("""\
ACTIONS are:
- "write-ini" create ``wipy-ftp.ini`` with default settings
- "install" copy boot.py, main.py and /lib from the PC to the WiPy
- "sync-lib" copies only /lib
- "sync-top" copies only boot.py, main.py
- "config-wlan" ask for SSID/Password and write wlanconfig.py on WiPy
- "ls" with optional remote path argument: list files
- "cp" with local source and remote destination: uploads binary file
- "cat" with remote filename: show file contents
- "backup" download everything in /flash
- "fwupgrade" write mcuimg.bin file to WiPy for firmware upgrade
- "help" this text
""")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import time
from unittest import mock
from oslo_config import fixture as config
from oslo_serialization import jsonutils
from oslotest import base as test_base
import requests
import webob.dec
import webob.exc
from oslo_middleware import healthcheck
from oslo_middleware.healthcheck import __main__
class HealthcheckMainTests(test_base.BaseTestCase):
def test_startup_response(self):
server = __main__.create_server(0)
th = threading.Thread(target=server.serve_forever)
th.start()
self.addCleanup(server.shutdown)
while True:
try:
# Connecting on 0.0.0.0 is not allowed on windows
# The operating system will return WSAEADDRNOTAVAIL which
# in turn will throw a requests.ConnectionError
r = requests.get("http://127.0.0.1:%s" % (
server.server_address[1]))
except requests.ConnectionError:
# Server hasn't started up yet, try again in a few.
time.sleep(1)
else:
self.assertEqual(200, r.status_code)
break
class HealthcheckTests(test_base.BaseTestCase):
def setUp(self):
super(HealthcheckTests, self).setUp()
self.useFixture(config.Config())
@staticmethod
@webob.dec.wsgify
def application(req):
return 'Hello, World!!!'
def _do_test_request(self, conf={}, path='/healthcheck',
accept='text/plain', method='GET',
server_port=80):
self.app = healthcheck.Healthcheck(self.application, conf)
req = webob.Request.blank(path, accept=accept, method=method)
req.server_port = server_port
res = req.get_response(self.app)
return res
def _do_test(self, conf={}, path='/healthcheck',
expected_code=webob.exc.HTTPOk.code,
expected_body=b'', accept='text/plain',
method='GET', server_port=80):
res = self._do_test_request(conf=conf, path=path,
accept=accept, method=method,
server_port=server_port)
self.assertEqual(expected_code, res.status_int)
self.assertEqual(expected_body, res.body)
def test_default_path_match(self):
self._do_test()
def test_default_path_not_match(self):
self._do_test(path='/toto', expected_body=b'Hello, World!!!')
def test_configured_path_match(self):
conf = {'path': '/hidden_healthcheck'}
self._do_test(conf, path='/hidden_healthcheck')
def test_configured_path_not_match(self):
conf = {'path': '/hidden_healthcheck'}
self._do_test(conf, path='/toto', expected_body=b'Hello, World!!!')
@mock.patch('oslo_middleware.healthcheck.disable_by_file.LOG')
def test_disablefile_unconfigured(self, fake_log):
fake_warn = fake_log.warning
conf = {'backends': 'disable_by_file'}
self._do_test(conf, expected_body=b'OK')
self.assertIn('disable_by_file', self.app._backends.names())
fake_warn.assert_called_once_with(
'DisableByFile healthcheck middleware '
'enabled without disable_by_file_path '
'set'
)
def test_disablefile_enabled(self):
conf = {'backends': 'disable_by_file',
'disable_by_file_path': '/foobar'}
self._do_test(conf, expected_body=b'OK')
self.assertIn('disable_by_file', self.app._backends.names())
def test_disablefile_enabled_head(self):
conf = {'backends': 'disable_by_file',
'disable_by_file_path': '/foobar'}
self._do_test(conf, expected_body=b'', method='HEAD',
expected_code=webob.exc.HTTPNoContent.code)
def test_disablefile_enabled_html_detailed(self):
conf = {'backends': 'disable_by_file',
'disable_by_file_path': '/foobar', 'detailed': True}
res = self._do_test_request(conf, accept="text/html")
self.assertIn(b'Result of 1 checks:', res.body)
self.assertIn(b'<TD>OK</TD>', res.body)
self.assertEqual(webob.exc.HTTPOk.code, res.status_int)
def test_disablefile_disabled(self):
filename = self.create_tempfiles([('test', 'foobar')])[0]
conf = {'backends': 'disable_by_file',
'disable_by_file_path': filename}
self._do_test(conf,
expected_code=webob.exc.HTTPServiceUnavailable.code,
expected_body=b'DISABLED BY FILE')
self.assertIn('disable_by_file', self.app._backends.names())
def test_disablefile_disabled_head(self):
filename = self.create_tempfiles([('test', 'foobar')])[0]
conf = {'backends': 'disable_by_file',
'disable_by_file_path': filename}
self._do_test(conf,
expected_code=webob.exc.HTTPServiceUnavailable.code,
expected_body=b'', method='HEAD')
self.assertIn('disable_by_file', self.app._backends.names())
def test_disablefile_disabled_html_detailed(self):
filename = self.create_tempfiles([('test', 'foobar')])[0]
conf = {'backends': 'disable_by_file',
'disable_by_file_path': filename, 'detailed': True}
res = self._do_test_request(conf, accept="text/html")
self.assertIn(b'<TD>DISABLED BY FILE</TD>', res.body)
self.assertEqual(webob.exc.HTTPServiceUnavailable.code,
res.status_int)
def test_two_backends(self):
filename = self.create_tempfiles([('test', 'foobar')])[0]
conf = {'backends': 'disable_by_file,disable_by_file',
'disable_by_file_path': filename}
self._do_test(conf,
expected_code=webob.exc.HTTPServiceUnavailable.code,
expected_body=b'DISABLED BY FILE\nDISABLED BY FILE')
self.assertIn('disable_by_file', self.app._backends.names())
def test_disable_by_port_file(self):
filename = self.create_tempfiles([('test', 'foobar')])[0]
conf = {'backends': 'disable_by_files_ports',
'disable_by_file_paths': "80:%s" % filename}
self._do_test(conf,
expected_code=webob.exc.HTTPServiceUnavailable.code,
expected_body=b'DISABLED BY FILE')
self.assertIn('disable_by_files_ports', self.app._backends.names())
def test_no_disable_by_port_file(self):
filename = self.create_tempfiles([('test', 'foobar')])[0]
conf = {'backends': 'disable_by_files_ports',
'disable_by_file_paths': "8000:%s" % filename}
self._do_test(conf,
expected_code=webob.exc.HTTPOk.code,
expected_body=b'OK')
self.assertIn('disable_by_files_ports', self.app._backends.names())
def test_disable_by_port_many_files(self):
filename = self.create_tempfiles([('test', 'foobar')])[0]
filename2 = self.create_tempfiles([('test2', 'foobar2')])[0]
conf = {'backends': 'disable_by_files_ports',
'disable_by_file_paths': "80:%s,81:%s" % (filename, filename2)}
self._do_test(conf,
expected_code=webob.exc.HTTPServiceUnavailable.code,
expected_body=b'DISABLED BY FILE')
self._do_test(conf,
expected_code=webob.exc.HTTPServiceUnavailable.code,
expected_body=b'DISABLED BY FILE',
server_port=81)
self.assertIn('disable_by_files_ports', self.app._backends.names())
def test_json_response(self):
expected_body = jsonutils.dumps({'detailed': False, 'reasons': []},
indent=4,
sort_keys=True).encode('utf-8')
self._do_test(expected_body=expected_body,
accept='application/json')
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from ...util import transforms
from ...geometry import Rect
from ._util import arg_to_vec4, as_vec4
from .base_transform import BaseTransform
class NullTransform(BaseTransform):
""" Transform having no effect on coordinates (identity transform).
"""
glsl_map = "vec4 null_transform_map(vec4 pos) {return pos;}"
glsl_imap = "vec4 null_transform_imap(vec4 pos) {return pos;}"
Linear = True
Orthogonal = True
NonScaling = True
Isometric = True
@arg_to_vec4
def map(self, coords):
"""Map coordinates
Parameters
----------
coords : array-like
Coordinates to map.
"""
return coords
def imap(self, coords):
"""Inverse map coordinates
Parameters
----------
coords : array-like
Coordinates to inverse map.
"""
return coords
def __mul__(self, tr):
return tr
def __rmul__(self, tr):
return tr
class STTransform(BaseTransform):
""" Transform performing only scale and translate, in that order.
Parameters
----------
scale : array-like
Scale factors for X, Y, Z axes.
translate : array-like
Scale factors for X, Y, Z axes.
"""
glsl_map = """
vec4 st_transform_map(vec4 pos) {
return vec4(pos.xyz * $scale.xyz + $translate.xyz * pos.w, pos.w);
}
"""
glsl_imap = """
vec4 st_transform_imap(vec4 pos) {
return vec4((pos.xyz - $translate.xyz * pos.w) / $scale.xyz,
pos.w);
}
"""
Linear = True
Orthogonal = True
NonScaling = False
Isometric = False
def __init__(self, scale=None, translate=None):
super(STTransform, self).__init__()
self._update_map = True
self._update_imap = True
self._scale = np.ones(4, dtype=np.float32)
self._translate = np.zeros(4, dtype=np.float32)
s = ((1.0, 1.0, 1.0, 1.0) if scale is None else
as_vec4(scale, default=(1, 1, 1, 1)))
t = ((0.0, 0.0, 0.0, 0.0) if translate is None else
as_vec4(translate, default=(0, 0, 0, 0)))
self._set_st(s, t)
@arg_to_vec4
def map(self, coords):
"""Map coordinates
Parameters
----------
coords : array-like
Coordinates to map.
Returns
-------
coords : ndarray
Coordinates.
"""
m = np.empty(coords.shape)
m[:, :3] = (coords[:, :3] * self.scale[np.newaxis, :3] +
coords[:, 3:] * self.translate[np.newaxis, :3])
m[:, 3] = coords[:, 3]
return m
@arg_to_vec4
def imap(self, coords):
"""Invert map coordinates
Parameters
----------
coords : array-like
Coordinates to inverse map.
Returns
-------
coords : ndarray
Coordinates.
"""
m = np.empty(coords.shape)
m[:, :3] = ((coords[:, :3] -
coords[:, 3:] * self.translate[np.newaxis, :3]) /
self.scale[np.newaxis, :3])
m[:, 3] = coords[:, 3]
return m
def shader_map(self):
if self._update_map:
self._shader_map['scale'] = self.scale
self._shader_map['translate'] = self.translate
self._update_map = False
return self._shader_map
def shader_imap(self):
if self._update_imap:
self._shader_imap['scale'] = self.scale
self._shader_imap['translate'] = self.translate
self._update_imap = False
return self._shader_imap
@property
def scale(self):
return self._scale.copy()
@scale.setter
def scale(self, s):
s = as_vec4(s, default=(1, 1, 1, 1))
self._set_st(scale=s)
@property
def translate(self):
return self._translate.copy()
@translate.setter
def translate(self, t):
t = as_vec4(t, default=(0, 0, 0, 0))
self._set_st(translate=t)
def _set_st(self, scale=None, translate=None):
update = False
if scale is not None and not np.all(scale == self._scale):
self._scale[:] = scale
update = True
if translate is not None and not np.all(translate == self._translate):
self._translate[:] = translate
update = True
if update:
self._update_map = True
self._update_imap = True
self.update() # inform listeners there has been a change
def move(self, move):
"""Change the translation of this transform by the amount given.
Parameters
----------
move : array-like
The values to be added to the current translation of the transform.
"""
move = as_vec4(move, default=(0, 0, 0, 0))
self.translate = self.translate + move
def zoom(self, zoom, center=(0, 0, 0), mapped=True):
"""Update the transform such that its scale factor is changed, but
the specified center point is left unchanged.
Parameters
----------
zoom : array-like
Values to multiply the transform's current scale
factors.
center : array-like
The center point around which the scaling will take place.
mapped : bool
Whether *center* is expressed in mapped coordinates (True) or
unmapped coordinates (False).
"""
zoom = as_vec4(zoom, default=(1, 1, 1, 1))
center = as_vec4(center, default=(0, 0, 0, 0))
scale = self.scale * zoom
if mapped:
trans = center - (center - self.translate) * zoom
else:
trans = self.scale * (1 - zoom) * center + self.translate
self._set_st(scale=scale, translate=trans)
def as_affine(self):
m = AffineTransform()
m.scale(self.scale)
m.translate(self.translate)
return m
@classmethod
def from_mapping(cls, x0, x1):
""" Create an STTransform from the given mapping
See `set_mapping` for details.
Parameters
----------
x0 : array-like
Start.
x1 : array-like
End.
Returns
-------
t : instance of STTransform
The transform.
"""
t = cls()
t.set_mapping(x0, x1)
return t
def set_mapping(self, x0, x1):
"""Configure this transform such that it maps points x0 => x1
Parameters
----------
x0 : array-like, shape (2, 2) or (2, 3)
Start location.
x1 : array-like, shape (2, 2) or (2, 3)
End location.
Examples
--------
For example, if we wish to map the corners of a rectangle::
>>> p1 = [[0, 0], [200, 300]]
onto a unit cube::
>>> p2 = [[-1, -1], [1, 1]]
then we can generate the transform as follows::
>>> tr = STTransform()
>>> tr.set_mapping(p1, p2)
>>> assert tr.map(p1)[:,:2] == p2 # test
"""
# if args are Rect, convert to array first
if isinstance(x0, Rect):
x0 = x0._transform_in()[:3]
if isinstance(x1, Rect):
x1 = x1._transform_in()[:3]
x0 = np.asarray(x0)
x1 = np.asarray(x1)
if (x0.ndim != 2 or x0.shape[0] != 2 or x1.ndim != 2 or
x1.shape[0] != 2):
raise TypeError("set_mapping requires array inputs of shape "
"(2, N).")
denom = x0[1] - x0[0]
mask = denom == 0
denom[mask] = 1.0
s = (x1[1] - x1[0]) / denom
s[mask] = 1.0
s[x0[1] == x0[0]] = 1.0
t = x1[0] - s * x0[0]
s = as_vec4(s, default=(1, 1, 1, 1))
t = as_vec4(t, default=(0, 0, 0, 0))
self._set_st(scale=s, translate=t)
def __mul__(self, tr):
if isinstance(tr, STTransform):
s = self.scale * tr.scale
t = self.translate + (tr.translate * self.scale)
return STTransform(scale=s, translate=t)
elif isinstance(tr, AffineTransform):
return self.as_affine() * tr
else:
return super(STTransform, self).__mul__(tr)
def __rmul__(self, tr):
if isinstance(tr, AffineTransform):
return tr * self.as_affine()
return super(STTransform, self).__rmul__(tr)
def __repr__(self):
return ("<STTransform scale=%s translate=%s>"
% (self.scale, self.translate))
class AffineTransform(BaseTransform):
"""Affine transformation class
Parameters
----------
matrix : array-like | None
4x4 array to use for the transform.
"""
glsl_map = """
vec4 affine_transform_map(vec4 pos) {
return $matrix * pos;
}
"""
glsl_imap = """
vec4 affine_transform_imap(vec4 pos) {
return $inv_matrix * pos;
}
"""
Linear = True
Orthogonal = False
NonScaling = False
Isometric = False
def __init__(self, matrix=None):
super(AffineTransform, self).__init__()
if matrix is not None:
self.matrix = matrix
else:
self.reset()
@arg_to_vec4
def map(self, coords):
"""Map coordinates
Parameters
----------
coords : array-like
Coordinates to map.
Returns
-------
coords : ndarray
Coordinates.
"""
# looks backwards, but both matrices are transposed.
return np.dot(coords, self.matrix)
@arg_to_vec4
def imap(self, coords):
"""Inverse map coordinates
Parameters
----------
coords : array-like
Coordinates to inverse map.
Returns
-------
coords : ndarray
Coordinates.
"""
return np.dot(coords, self.inv_matrix)
def shader_map(self):
fn = super(AffineTransform, self).shader_map()
fn['matrix'] = self.matrix # uniform mat4
return fn
def shader_imap(self):
fn = super(AffineTransform, self).shader_imap()
fn['inv_matrix'] = self.inv_matrix # uniform mat4
return fn
@property
def matrix(self):
return self._matrix
@matrix.setter
def matrix(self, m):
self._matrix = m
self._inv_matrix = None
self.shader_map()
self.shader_imap()
self.update()
@property
def inv_matrix(self):
if self._inv_matrix is None:
self._inv_matrix = np.linalg.inv(self.matrix)
return self._inv_matrix
@arg_to_vec4
def translate(self, pos):
"""
Translate the matrix
The translation is applied *after* the transformations already present
in the matrix.
Parameters
----------
pos : arrayndarray
Position to translate by.
"""
self.matrix = np.dot(self.matrix, transforms.translate(pos[0, :3]))
def scale(self, scale, center=None):
"""
Scale the matrix about a given origin.
The scaling is applied *after* the transformations already present
in the matrix.
Parameters
----------
scale : array-like
Scale factors along x, y and z axes.
center : array-like or None
The x, y and z coordinates to scale around. If None,
(0, 0, 0) will be used.
"""
scale = transforms.scale(as_vec4(scale, default=(1, 1, 1, 1))[0, :3])
if center is not None:
center = as_vec4(center)[0, :3]
scale = np.dot(np.dot(transforms.translate(-center), scale),
transforms.translate(center))
self.matrix = np.dot(self.matrix, scale)
def rotate(self, angle, axis):
"""
Rotate the matrix by some angle about a given axis.
The rotation is applied *after* the transformations already present
in the matrix.
Parameters
----------
angle : float
The angle of rotation, in degrees.
axis : array-like
The x, y and z coordinates of the axis vector to rotate around.
"""
self.matrix = np.dot(self.matrix, transforms.rotate(angle, axis))
def set_mapping(self, points1, points2):
""" Set to a 3D transformation matrix that maps points1 onto points2.
Parameters
----------
points1 : array-like, shape (4, 3)
Four starting 3D coordinates.
points2 : array-like, shape (4, 3)
Four ending 3D coordinates.
"""
# note: need to transpose because util.functions uses opposite
# of standard linear algebra order.
self.matrix = transforms.affine_map(points1, points2).T
def set_ortho(self, l, r, b, t, n, f):
"""Set ortho transform
Parameters
----------
l : float
Left.
r : float
Right.
b : float
Bottom.
t : float
Top.
n : float
Near.
f : float
Far.
"""
self.matrix = transforms.ortho(l, r, b, t, n, f)
def reset(self):
self.matrix = np.eye(4)
def __mul__(self, tr):
if (isinstance(tr, AffineTransform) and not
any(tr.matrix[:3, 3] != 0)):
# don't multiply if the perspective column is used
return AffineTransform(matrix=np.dot(tr.matrix, self.matrix))
else:
return tr.__rmul__(self)
def __repr__(self):
s = "%s(matrix=[" % self.__class__.__name__
indent = " "*len(s)
s += str(list(self.matrix[0])) + ",\n"
s += indent + str(list(self.matrix[1])) + ",\n"
s += indent + str(list(self.matrix[2])) + ",\n"
s += indent + str(list(self.matrix[3])) + "] at 0x%x)" % id(self)
return s
#class SRTTransform(BaseTransform):
# """ Transform performing scale, rotate, and translate, in that order.
#
# This transformation allows objects to be placed arbitrarily in a scene
# much the same way AffineTransform does. However, an incorrect order of
# operations in AffineTransform may result in shearing the object (if scale
# is applied after rotate) or in unpredictable translation (if scale/rotate
# is applied after translation). SRTTransform avoids these problems by
# enforcing the correct order of operations.
# """
# # TODO
class PerspectiveTransform(AffineTransform):
"""
Matrix transform that also implements perspective division.
Parameters
----------
matrix : array-like | None
4x4 array to use for the transform.
"""
def set_perspective(self, fov, aspect, near, far):
"""Set the perspective
Parameters
----------
fov : float
Field of view.
aspect : float
Aspect ratio.
near : float
Near location.
far : float
Far location.
"""
self.matrix = transforms.perspective(fov, aspect, near, far)
def set_frustum(self, l, r, b, t, n, f):
"""Set the frustum
Parameters
----------
l : float
Left.
r : float
Right.
b : float
Bottom.
t : float
Top.
n : float
Near.
f : float
Far.
"""
self.matrix = transforms.frustum(l, r, b, t, n, f)
|
|
# The Nexus software is licensed under the BSD 2-Clause license.
#
# You should have recieved a copy of this license with the software.
# If you did not, you can find one at the following link.
#
# http://opensource.org/licenses/bsd-license.php
from core.plugins import ProtocolPlugin
from core.decorators import *
from core.constants import *
from reqs.twisted.internet import reactor
maxundos = 3000
class RecthirdPlugin(ProtocolPlugin):
commands = {
"undo": "commandUndo",
"redo": "commandRedo",
}
hooks = {
"blockchange": "blockChanged",
"newworld": "newWorld",
}
def gotClient(self):
self.client.var_undolist = []
self.client.var_redolist = []
def blockChanged(self, x, y, z, block, selected_block, fromloc):
"Hook trigger for block changes."
world = self.client.world
originalblock = world.blockstore.raw_blocks[world.blockstore.get_offset(x, y, z)]
block = chr(block)
if len(self.client.var_undolist) < maxundos:
self.client.var_undolist.insert(0,((x,y,z),block,originalblock))
else:
del self.client.var_undolist[-1]
self.client.var_undolist.insert(0,((x,y,z),block,originalblock))
def newWorld(self, world):
"Hook to reset undolist in new worlds."
self.client.var_undolist = []
@build_list
def commandUndo(self, parts, fromloc, overriderank):
"/undo numchanges [username] - Guest\nUndoes yours or other people's changes (If Mod+)"
world = self.client.world
if len(parts) == 3:
if not self.client.isModPlus():
self.client.sendServerMessage("You are not a Mod+")
return
try:
username = parts[2].lower()
user = self.client.factory.usernames[username]
except:
self.client.sendServerMessage("%s is not online." % parts[2])
return
var_sublist = user.var_undolist[:]
undolistlength = len(user.var_undolist)
if parts[1] == "all":
def generate_changes():
try:
user = self.client.factory.usernames[username]
for index in range(undolistlength):
originalblock = user.var_undolist[index][2]
block = user.var_undolist[index][1]
i,j,k = user.var_undolist[index][0]
if not self.client.AllowedToBuild(i,j,k) and not overriderank:
self.client.sendServerMessage("You do not have permission to build here.")
return
del var_sublist[var_sublist.index(((i,j,k),block,originalblock))]
user.var_redolist.insert(0,((i,j,k),originalblock,block))
try:
world[i, j, k] = originalblock
except AssertionError:
self.client.sendServerMessage("Out of bounds undo error.")
return
user.queueTask(TASK_BLOCKSET, (i, j, k, originalblock), world=world)
user.sendBlock(i, j, k, originalblock)
yield
user.var_undolist = var_sublist
except:
self.client.sendSplitServerMessage("The user seems to have logged off before the undo could finish.")
return
else:
try:
num = int(parts[1])
except:
self.client.sendServerMessage("The numchanges must be a number or 'all'.")
return
if num > undolistlength:
self.client.sendServerMessage("They have not made that many changes.")
return
def generate_changes():
try:
for index in range(num):
originalblock = user.var_undolist[index][2]
block = user.var_undolist[index][1]
i,j,k = user.var_undolist[index][0]
if not self.client.AllowedToBuild(i,j,k) and not overriderank:
self.client.sendServerMessage("You do not have permission to build here.")
return
del var_sublist[var_sublist.index(((i,j,k),block,originalblock))]
user.var_redolist.insert(0,((i,j,k),originalblock,block))
try:
world[i, j, k] = originalblock
except AssertionError:
self.client.sendServerMessage("Out of bounds undo error.")
return
user.queueTask(TASK_BLOCKSET, (i, j, k, originalblock), world=world)
user.sendBlock(i, j, k, originalblock)
yield
user.var_undolist = var_sublist
except:
self.client.sendSplitServerMessage("The user seems to have logged off before the undo could finish.")
return
else:
self.client.sublist = self.client.var_undolist[:]
undolistlength = len(self.client.var_undolist)
if len(parts) == 1:
self.client.sendSplitServerMessage("Please specify a number of changes to undo or 'all' (and if you are Mod+ you can specify a username)")
return
else:
if parts[1] == "all":
def generate_changes():
for index in range(undolistlength):
originalblock = self.client.var_undolist[index][2]
block = self.client.var_undolist[index][1]
i,j,k = self.client.var_undolist[index][0]
if not self.client.AllowedToBuild(i,j,k) and not overriderank:
self.client.sendServerMessage("You no longer have permission to build here.")
return
del self.client.sublist[self.client.sublist.index(((i,j,k),block,originalblock))]
self.client.var_redolist.insert(0,((i,j,k),originalblock,block))
try:
world[i, j, k] = originalblock
except AssertionError:
self.client.sendServerMessage("Out of bounds undo error.")
return
self.client.queueTask(TASK_BLOCKSET, (i, j, k, originalblock), world=world)
self.client.sendBlock(i, j, k, originalblock)
yield
self.client.var_undolist = self.client.sublist
else:
try:
num = int(parts[1])
except:
self.client.sendServerMessage("The numchanges must be a number or 'all'.")
return
if num > undolistlength:
self.client.sendServerMessage("You have not made that many changes.")
return
def generate_changes():
for index in range(num):
originalblock = self.client.var_undolist[index][2]
block = self.client.var_undolist[index][1]
i,j,k = self.client.var_undolist[index][0]
if not self.client.AllowedToBuild(i,j,k) and not overriderank:
self.client.sendServerMessage("You no longer have permission to build here.")
return
del self.client.sublist[self.client.sublist.index(((i,j,k),block,originalblock))]
self.client.var_redolist.insert(0,((i,j,k),originalblock,block))
try:
world[i, j, k] = originalblock
except AssertionError:
self.client.sendServerMessage("Out of bounds undo error.")
return
self.client.queueTask(TASK_BLOCKSET, (i, j, k, originalblock), world=world)
self.client.sendBlock(i, j, k, originalblock)
yield
self.client.var_undolist = self.client.sublist
# Now, set up a loop delayed by the reactor
block_iter = iter(generate_changes())
def do_step():
# Do 10 blocks
try:
for x in range(10): # 10 blocks at a time, 10 blocks per tenths of a second, 100 blocks a second
block_iter.next()
reactor.callLater(0.01, do_step) # This is how long (in seconds) it waits to run another 10 blocks
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your undo just completed.")
pass
do_step()
@build_list
def commandRedo(self, parts, fromloc, overriderank):
"/redo numchanges [username] - Guest\nRedoes yours or other people's changes (If Mod+)"
world = self.client.world
if len(parts) == 3:
if not self.client.isModPlus():
self.client.sendServerMessage("You are not a Mod+")
return
try:
username = parts[2].lower()
user = self.client.factory.usernames[username]
except:
self.client.sendServerMessage("%s is not online." % parts[2])
return
var_sublist = user.var_redolist[:]
redolistlength = len(user.var_redolist)
if parts[1] == "all":
def generate_changes():
try:
user = self.client.factory.usernames[username]
for index in range(redolistlength):
originalblock = user.var_redolist[index][2]
block = user.var_redolist[index][1]
i,j,k = user.var_redolist[index][0]
if not self.client.AllowedToBuild(i,j,k) and not overriderank:
self.client.sendServerMessage("You do not have permission to build here.")
return
del var_sublist[var_sublist.index(((i,j,k),block,originalblock))]
user.var_undolist.insert(0,((i,j,k),originalblock,block))
try:
world[i, j, k] = originalblock
except AssertionError:
self.client.sendServerMessage("Out of bounds redo error.")
return
user.queueTask(TASK_BLOCKSET, (i, j, k, originalblock), world=world)
user.sendBlock(i, j, k, originalblock)
yield
user.var_redolist = var_sublist
except:
self.client.sendSplitServerMessage("The user seems to have logged off before the redo could finish.")
return
else:
try:
num = int(parts[1])
except:
self.client.sendServerMessage("The numchanges must be a number or 'all'.")
return
if num > redolistlength:
self.client.sendServerMessage("They have not made that many undos.")
return
def generate_changes():
try:
for index in range(num):
originalblock = user.var_redolist[index][2]
block = user.var_redolist[index][1]
i,j,k = user.var_redolist[index][0]
if not self.client.AllowedToBuild(i,j,k) and not overriderank:
self.client.sendServerMessage("You do not have permission to build here.")
return
del var_sublist[var_sublist.index(((i,j,k),block,originalblock))]
user.var_undolist.insert(0,((i,j,k),originalblock,block))
try:
world[i, j, k] = originalblock
except AssertionError:
self.client.sendServerMessage("Out of bounds redo error.")
return
user.queueTask(TASK_BLOCKSET, (i, j, k, originalblock), world=world)
user.sendBlock(i, j, k, originalblock)
yield
user.var_redolist = var_sublist
except:
self.client.sendSplitServerMessage("The user seems to have logged off before the redo could finish.")
return
else:
self.client.sublist = self.client.var_redolist[:]
redolistlength = len(self.client.var_redolist)
if len(parts) == 1:
self.client.sendSplitServerMessage("Please specify a number of changes to redo or 'all' (and if you are Mod+ you can specify a username)")
return
else:
if parts[1] == "all":
def generate_changes():
for index in range(redolistlength):
originalblock = self.client.var_redolist[index][2]
block = self.client.var_redolist[index][1]
i,j,k = self.client.var_redolist[index][0]
if not self.client.AllowedToBuild(i,j,k) and not overriderank:
self.client.sendServerMessage("You no longer have permission to build here.")
return
del self.client.sublist[self.client.sublist.index(((i,j,k),block,originalblock))]
self.client.var_undolist.insert(0,((i,j,k),originalblock,block))
try:
world[i, j, k] = originalblock
except AssertionError:
self.client.sendServerMessage("Out of bounds redo error.")
return
self.client.queueTask(TASK_BLOCKSET, (i, j, k, originalblock), world=world)
self.client.sendBlock(i, j, k, originalblock)
yield
self.client.var_redolist = self.client.sublist
else:
try:
num = int(parts[1])
except:
self.client.sendServerMessage("The numchanges must be a number or 'all'.")
return
if num > redolistlength:
self.client.sendServerMessage("You have not made that many undos.")
return
def generate_changes():
for index in range(num):
originalblock = self.client.var_redolist[index][2]
block = self.client.var_redolist[index][1]
i,j,k = self.client.var_redolist[index][0]
if not self.client.AllowedToBuild(i,j,k) and not overriderank:
self.client.sendServerMessage("You no longer have permission to build here.")
return
del self.client.sublist[self.client.sublist.index(((i,j,k),block,originalblock))]
self.client.var_undolist.insert(0,((i,j,k),originalblock,block))
try:
world[i, j, k] = originalblock
except AssertionError:
self.client.sendServerMessage("Out of bounds redo error.")
return
self.client.queueTask(TASK_BLOCKSET, (i, j, k, originalblock), world=world)
self.client.sendBlock(i, j, k, originalblock)
yield
self.client.var_redolist = self.client.sublist
# Now, set up a loop delayed by the reactor
block_iter = iter(generate_changes())
def do_step():
# Do 10 blocks
try:
for x in range(10): # 10 blocks at a time, 10 blocks per tenths of a second, 100 blocks a second
block_iter.next()
reactor.callLater(0.01, do_step) # This is how long (in seconds) it waits to run another 10 blocks
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your redo just completed.")
pass
do_step()
|
|
import copy_reg
import imp
import os
import re
import sys
import types
from importlib import import_module
from os.path import abspath, join, exists
from seisflows.tools import msg
from seisflows.tools.err import ParameterError
from seisflows.tools import unix
from seisflows.tools.tools import loadjson, loadobj, loadpy, savejson, saveobj
from seisflows.tools.tools import module_exists, package_exists
# SeisFlows consists of interacting 'system', 'preprocess', 'solver', 'postprocess', 'optimize', and 'workflow' objects. Each corresponds simultaneously to a module in the SeisFlows source code, a class that is instantiated and made accessible via sys.modules, and a parameter in a global dictionary. Once in memory, these objects can be thought of as comprising the complete 'state' of a SeisFlows session
# The following list is one of the few hardwired aspects of the whole SeisFlows package. Any changes may result in circular imports or other problems
names = []
names += ['system']
names += ['preprocess']
names += ['solver']
names += ['postprocess']
names += ['optimize']
names += ['workflow']
def config():
""" Instantiates SeisFlows objects and makes them globally accessible by
registering them in sys.modules
"""
# parameters and paths must already be loaded
# (normally this is done by sfsubmit)
assert 'seisflows_parameters' in sys.modules
assert 'seisflows_paths' in sys.modules
# check if objects already exist on disk
if exists(_output()):
print msg.WarningOverwrite
sys.exit()
# instantiate and register objects
for name in names:
sys.modules['seisflows_'+name] = custom_import(name)()
# error checking
for name in names:
sys.modules['seisflows_'+name].check()
if not hasattr(sys.modules['seisflows_parameters'], 'workflow'.upper()):
print msg.MissingParameter_Worfklow
sys.exit(-1)
if not hasattr(sys.modules['seisflows_parameters'], 'system'.upper()):
print msg.MissingParameter_System
sys.exit(-1)
def save():
""" Exports session to disk
"""
unix.mkdir(_output())
for name in ['parameters', 'paths']:
fullfile = join(_output(), 'seisflows_'+name+'.json')
savejson(fullfile, sys.modules['seisflows_'+name].__dict__)
for name in names:
fullfile = join(_output(), 'seisflows_'+name+'.p')
saveobj(fullfile, sys.modules['seisflows_'+name])
def load(path):
""" Imports session from disk
"""
for name in ['parameters', 'paths']:
fullfile = join(_full(path), 'seisflows_'+name+'.json')
sys.modules['seisflows_'+name] = Dict(loadjson(fullfile))
for name in names:
fullfile = join(_full(path), 'seisflows_'+name+'.p')
sys.modules['seisflows_'+name] = loadobj(fullfile)
class Dict(object):
""" Dictionary-like object for holding parameters or paths
"""
def __iter__(self):
return iter(sorted(self.__dict__.keys()))
def __getattr__(self, key):
return self.__dict__[key]
def __getitem__(self, key):
return self.__dict__[key]
def __setattr__(self, key, val):
if key in self.__dict__:
raise TypeError("Once defined, parameters cannot be changed.")
self.__dict__[key] = val
def __delattr__(self, key):
if key in self.__dict__:
raise TypeError("Once defined, parameters cannot be deleted.")
raise KeyError
def update(self, newdict):
super(Dict, self).__setattr__('__dict__', newdict)
def __init__(self, newdict):
self.update(newdict)
class Null(object):
""" Always and reliably does nothing
"""
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __nonzero__(self):
return False
def __getattr__(self, key):
return self
def __setattr__(self, key, val):
return self
def __delattr__(self, key):
return self
def custom_import(*args):
""" Imports SeisFlows module and extracts class of same name. For example,
custom_import('workflow', 'inversion')
imports 'seisflows.workflow.inversion' and, from this module, extracts
class 'inversion'.
"""
# parse input arguments
if len(args) == 0:
raise Exception(msg.ImportError1)
if args[0] not in names:
raise Exception(msg.ImportError2)
if len(args) == 1:
args += (_try(args[0]),)
if not args[1]:
return Null
# generate package list
packages = ['seisflows']
# does module exist?
_exists = False
for package in packages:
full_dotted_name = package+'.'+args[0]+'.'+args[1]
if module_exists(full_dotted_name):
_exists = True
break
if not _exists:
raise Exception(msg.ImportError3 %
(args[0], args[1], args[0].upper()))
# import module
module = import_module(full_dotted_name)
# extract class
if hasattr(module, args[1]):
return getattr(module, args[1])
else:
raise Exception(msg.ImportError4 %
(args[0], args[1], args[1]))
def tilde_expand(mydict):
""" Expands tilde character in path strings
"""
for key,val in mydict.items():
if type(val) not in [str, unicode]:
raise Exception
if val[0:2] == '~/':
mydict[key] = os.getenv('HOME') +'/'+ val[2:]
return mydict
# utility functions
def _par(key):
return sys.modules['seisflows_parameters'][key.upper()]
def _path(key):
return sys.modules['seisflows_paths'][key.upper()]
def _try(key):
try:
return _par(key)
except KeyError:
return None
def _output():
try:
return _full(_path('output'))
except:
return _full(join('.', 'output'))
def _full(path):
try:
return join(abspath(path), '')
except:
raise IOError
# the following code changes how instance methods are handled by pickle. placing it here, in this module, ensures that pickle changes will be in effect for all SeisFlows workflows
# for relevant discussion, see stackoverflow thread "Can't pickle <type 'instancemethod'> when using python's multiprocessing Pool.map()"
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
|
|
# -*- coding: utf-8 -*-
"""
celery.worker.autoreload
~~~~~~~~~~~~~~~~~~~~~~~~
This module implements automatic module reloading
"""
from __future__ import absolute_import
import hashlib
import os
import select
import sys
import time
from collections import defaultdict
from threading import Event
from kombu.utils import eventio
from kombu.utils.encoding import ensure_bytes
from celery import bootsteps
from celery.five import items
from celery.platforms import ignore_errno
from celery.utils.imports import module_file
from celery.utils.log import get_logger
from celery.utils.threads import bgThread
from .components import Pool
try: # pragma: no cover
import pyinotify
_ProcessEvent = pyinotify.ProcessEvent
except ImportError: # pragma: no cover
pyinotify = None # noqa
_ProcessEvent = object # noqa
__all__ = [
'WorkerComponent', 'Autoreloader', 'Monitor', 'BaseMonitor',
'StatMonitor', 'KQueueMonitor', 'InotifyMonitor', 'file_hash',
]
logger = get_logger(__name__)
class WorkerComponent(bootsteps.StartStopStep):
label = 'Autoreloader'
conditional = True
requires = (Pool, )
def __init__(self, w, autoreload=None, **kwargs):
self.enabled = w.autoreload = autoreload
w.autoreloader = None
def create(self, w):
w.autoreloader = self.instantiate(w.autoreloader_cls, w)
return w.autoreloader if not w.use_eventloop else None
def register_with_event_loop(self, w, hub):
w.autoreloader.register_with_event_loop(hub)
hub.on_close.add(w.autoreloader.on_event_loop_close)
def file_hash(filename, algorithm='md5'):
hobj = hashlib.new(algorithm)
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(2 ** 20), ''):
hobj.update(ensure_bytes(chunk))
return hobj.digest()
class BaseMonitor(object):
def __init__(self, files,
on_change=None, shutdown_event=None, interval=0.5):
self.files = files
self.interval = interval
self._on_change = on_change
self.modify_times = defaultdict(int)
self.shutdown_event = shutdown_event or Event()
def start(self):
raise NotImplementedError('Subclass responsibility')
def stop(self):
pass
def on_change(self, modified):
if self._on_change:
return self._on_change(modified)
def on_event_loop_close(self, hub):
pass
class StatMonitor(BaseMonitor):
"""File change monitor based on the ``stat`` system call."""
def _mtimes(self):
return ((f, self._mtime(f)) for f in self.files)
def _maybe_modified(self, f, mt):
return mt is not None and self.modify_times[f] != mt
def register_with_event_loop(self, hub):
hub.call_repeatedly(2.0, self.find_changes)
def find_changes(self):
maybe_modified = self._maybe_modified
modified = dict((f, mt) for f, mt in self._mtimes()
if maybe_modified(f, mt))
if modified:
self.on_change(modified)
self.modify_times.update(modified)
def start(self):
while not self.shutdown_event.is_set():
self.find_changes()
time.sleep(self.interval)
@staticmethod
def _mtime(path):
try:
return os.stat(path).st_mtime
except Exception:
pass
class KQueueMonitor(BaseMonitor):
"""File change monitor based on BSD kernel event notifications"""
def __init__(self, *args, **kwargs):
super(KQueueMonitor, self).__init__(*args, **kwargs)
self.filemap = dict((f, None) for f in self.files)
self.fdmap = {}
def register_with_event_loop(self, hub):
if eventio.kqueue is not None:
self._kq = eventio._kqueue()
self.add_events(self._kq)
self._kq.on_file_change = self.handle_event
hub.add_reader(self._kq._kqueue, self._kq.poll, 0)
def on_event_loop_close(self, hub):
self.close(self._kq)
def add_events(self, poller):
for f in self.filemap:
self.filemap[f] = fd = os.open(f, os.O_RDONLY)
self.fdmap[fd] = f
poller.watch_file(fd)
def handle_event(self, events):
self.on_change([self.fdmap[e.ident] for e in events])
def start(self):
self.poller = eventio.poll()
self.add_events(self.poller)
self.poller.on_file_change = self.handle_event
while not self.shutdown_event.is_set():
self.poller.poll(1)
def close(self, poller):
for f, fd in items(self.filemap):
if fd is not None:
poller.unregister(fd)
with ignore_errno('EBADF'): # pragma: no cover
os.close(fd)
self.filemap.clear()
self.fdmap.clear()
def stop(self):
self.close(self.poller)
self.poller.close()
class InotifyMonitor(_ProcessEvent):
"""File change monitor based on Linux kernel `inotify` subsystem"""
def __init__(self, modules, on_change=None, **kwargs):
assert pyinotify
self._modules = modules
self._on_change = on_change
self._wm = None
self._notifier = None
def register_with_event_loop(self, hub):
self.create_notifier()
hub.add_reader(self._wm.get_fd(), self.on_readable)
def on_event_loop_close(self, hub):
pass
def on_readable(self):
self._notifier.read_events()
self._notifier.process_events()
def create_notifier(self):
self._wm = pyinotify.WatchManager()
self._notifier = pyinotify.Notifier(self._wm, self)
add_watch = self._wm.add_watch
flags = pyinotify.IN_MODIFY | pyinotify.IN_ATTRIB
for m in self._modules:
add_watch(m, flags)
def start(self):
try:
self.create_notifier()
self._notifier.loop()
finally:
if self._wm:
self._wm.close()
# Notifier.close is called at the end of Notifier.loop
self._wm = self._notifier = None
def stop(self):
pass
def process_(self, event):
self.on_change([event.path])
process_IN_ATTRIB = process_IN_MODIFY = process_
def on_change(self, modified):
if self._on_change:
return self._on_change(modified)
def default_implementation():
if hasattr(select, 'kqueue') and eventio.kqueue is not None:
return 'kqueue'
elif sys.platform.startswith('linux') and pyinotify:
return 'inotify'
else:
return 'stat'
implementations = {'kqueue': KQueueMonitor,
'inotify': InotifyMonitor,
'stat': StatMonitor}
Monitor = implementations[
os.environ.get('CELERYD_FSNOTIFY') or default_implementation()]
class Autoreloader(bgThread):
"""Tracks changes in modules and fires reload commands"""
Monitor = Monitor
def __init__(self, controller, modules=None, monitor_cls=None, **options):
super(Autoreloader, self).__init__()
self.controller = controller
app = self.controller.app
self.modules = app.loader.task_modules if modules is None else modules
self.options = options
self._monitor = None
self._hashes = None
self.file_to_module = {}
def on_init(self):
files = self.file_to_module
files.update(dict(
(module_file(sys.modules[m]), m) for m in self.modules))
self._monitor = self.Monitor(
files, self.on_change,
shutdown_event=self._is_shutdown, **self.options)
self._hashes = dict([(f, file_hash(f)) for f in files])
def register_with_event_loop(self, hub):
if self._monitor is None:
self.on_init()
self._monitor.register_with_event_loop(hub)
def on_event_loop_close(self, hub):
if self._monitor is not None:
self._monitor.on_event_loop_close(hub)
def body(self):
self.on_init()
with ignore_errno('EINTR', 'EAGAIN'):
self._monitor.start()
def _maybe_modified(self, f):
if os.path.exists(f):
digest = file_hash(f)
if digest != self._hashes[f]:
self._hashes[f] = digest
return True
return False
def on_change(self, files):
modified = [f for f in files if self._maybe_modified(f)]
if modified:
names = [self.file_to_module[module] for module in modified]
logger.info('Detected modified modules: %r', names)
self._reload(names)
def _reload(self, modules):
self.controller.reload(modules, reload=True)
def stop(self):
if self._monitor:
self._monitor.stop()
|
|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
import popupcad
import shapely.geometry
import numpy
import qt.QtCore as qc
import qt.QtGui as qg
import numpy.linalg
try: #Hack to ensure Python 2 & 3 support
import itertools.izip as zip
except ImportError:
pass
import shapely.geometry as sg
from popupcad.filetypes.genericshapebase import GenericShapeBase
class GenericLine(GenericShapeBase):
@classmethod
def condition_loop(cls,loop):
return cls._condition_loop(loop,remove_loop_reduncancy=False,remove_forward_redundancy=False)
def outputinteractive(self):
from popupcad.graphics2d.interactive import InteractiveLine
return InteractiveLine(self)
def outputstatic(self, *args, **kwargs):
from popupcad.graphics2d.static import StaticLine
return StaticLine(self, *args, **kwargs)
def gen_painterpath(self, exterior, interiors):
path = qg.QPainterPath()
path.addPolygon(self.generateQPolygon(exterior))
return path
def to_shapely(self,scaling = 1):
exterior_p = self.exteriorpoints(scaling = scaling)
obj = sg.LineString(exterior_p)
return obj
def segments(self):
return self.segments_open()
def output_dxf(self,model_space,layer = None):
dxfattribs = {}
if layer is not None:
dxfattribs['layer']=layer
model_space.add_lwpolyline(self.exteriorpoints(),dxfattribs = dxfattribs)
class GenericPolyline(GenericShapeBase):
@classmethod
def condition_loop(cls,loop):
return cls._condition_loop(loop,remove_loop_reduncancy=False)
@classmethod
def remove_redundant_points(cls, points, scaling=1):
return GenericShapeBase.remove_redundant_points(points,scaling,loop_test = False)
def outputinteractive(self):
from popupcad.graphics2d.interactive import InteractivePath
return InteractivePath(self)
def outputstatic(self, *args, **kwargs):
from popupcad.graphics2d.static import StaticPath
return StaticPath(self, *args, **kwargs)
def gen_painterpath(self, exterior, interiors):
path = qg.QPainterPath()
path.addPolygon(self.generateQPolygon(exterior))
return path
def to_shapely(self,scaling = 1):
exterior_p = self.exteriorpoints(scaling = scaling)
try:
obj = sg.LineString(exterior_p)
return obj
except ValueError as e:
if e.args[0]=='LineStrings must have at least 2 coordinate tuples':
return sg.LineString()
else:
raise
def segments(self):
return self.segments_open()
def fill(self):
polygons = []
for loop in [self.get_exterior()]+self.get_interiors():
newloop = [vertex.copy(identical = False) for vertex in loop]
polygons.append(GenericPoly(newloop,[],self.is_construction()))
return polygons
def output_dxf(self,model_space,layer = None):
dxfattribs = {}
if layer is not None:
dxfattribs['layer']=layer
model_space.add_lwpolyline(self.exteriorpoints(),dxfattribs = dxfattribs)
def addvertex_exterior(self, vertex, special=False):
self.addvertex_exterior_special(vertex,special)
class GenericPoly(GenericShapeBase):
def outputinteractive(self):
from popupcad.graphics2d.interactive import InteractivePoly
return InteractivePoly(self)
def outputstatic(self, *args, **kwargs):
from popupcad.graphics2d.static import StaticPoly
return StaticPoly(self, *args, **kwargs)
def gen_painterpath(self, exterior, interiors):
path = qg.QPainterPath()
for item in [exterior] + interiors:
path.addPolygon(self.generateQPolygon(item))
path.closeSubpath()
return path
def triangles_inner(self):
from pypoly2tri.shapes import Point
from pypoly2tri.cdt import CDT
new = self.copy(identical = False)
new._condition(round_vertices=False,
test_rounded_vertices = True,
remove_forward_redundancy = True,
remove_loop_reduncancy = True,
terminate_with_start = False,
decimal_places = popupcad.geometry_round_value)
exterior = [Point(*point) for point in new.exteriorpoints(scaling = popupcad.triangulation_scaling)]
interiors = [[Point(*point) for point in interior]
for interior in new.interiorpoints(scaling = popupcad.triangulation_scaling)]
cdt = CDT(exterior)
[cdt.AddHole(interior) for interior in interiors]
cdt.Triangulate()
return cdt
def triangles3(self):
cdt = self.triangles_inner()
tris = [tri.toList() for tri in cdt.GetTriangles()]
tris = (numpy.array(tris)/popupcad.triangulation_scaling).tolist()
return tris
def to_shapely(self,scaling = 1):
exterior_p = self.exteriorpoints(scaling = scaling)
interiors_p = self.interiorpoints(scaling = scaling)
obj = sg.Polygon(exterior_p, interiors_p)
return obj
def addvertex_exterior(self, vertex, special=False):
self.addvertex_exterior_special(vertex,special)
def segments(self):
return self.segments_closed()
def mass_properties(self,density,z_lower,z_upper):
z_lower = z_lower/popupcad.SI_length_scaling
z_upper = z_upper/popupcad.SI_length_scaling
tris = numpy.array(self.triangles3())/popupcad.SI_length_scaling
shape = list(tris.shape)
shape[2]+=1
z_center = (z_lower+z_upper)/2
tris2 = numpy.ones(shape)
tris2[:,:,:2] = tris
areas = abs(numpy.array([numpy.linalg.det(tri) for tri in tris2])/2)
area = areas.sum()
tris2[:,:,2] = z_center
centroids = tris2.sum(1)/3
centroid = (areas*centroids.T).sum(1)/areas.sum()
thickness = z_upper - z_lower
volume = area*thickness
mass = volume*density
return area,centroid,volume,mass,tris
def inertia_tensor(self,about_point,density,z_lower,z_upper,tris):
z_lower = z_lower/popupcad.SI_length_scaling
z_upper = z_upper/popupcad.SI_length_scaling
import idealab_tools.geometry.triangle as triangle
tris3 = [triangle.Triangle(*tri) for tri in tris]
tets = [tet for tri in tris3 for tet in tri.extrude(z_lower,z_upper)]
Is = numpy.array([tet.I(density,about_point) for tet in tets])
I = Is.sum(0)
return I
def hollow(self):
polylines = []
for loop in [self.get_exterior()]+self.get_interiors():
newloop = [vertex.copy(identical = False) for vertex in loop+loop[0:1]]
polylines.append(GenericPolyline(newloop,[],self.is_construction()))
return polylines
def output_dxf(self,model_space,layer = None):
exterior = self.exteriorpoints()
dxfattribs = {'closed':True}
if layer is not None:
dxfattribs['layer']=layer
model_space.add_lwpolyline(exterior,dxfattribs=dxfattribs)
for interior in self.interiorpoints():
dxfattribs = {'closed':True}
if layer is not None:
dxfattribs['layer']=layer
model_space.add_lwpolyline(interior,dxfattribs=dxfattribs)
#Gets the center
def get_center(self):
points = self.exteriorpoints()
x_values = [point[0]/popupcad.SI_length_scaling for point in points]
y_values = [point[1]/popupcad.SI_length_scaling for point in points]
x = float(sum(x_values)) / len(x_values)
y = float(sum(y_values)) / len(y_values)
return (x, y)
def exterior_points_from_center(self):
center = self.get_center()
points = self.exteriorpoints()
x_values = [point[0]/popupcad.SI_length_scaling - center[0] for point in points]
y_values = [point[1]/popupcad.SI_length_scaling - center[1] for point in points]
return list(zip(x_values, y_values))
def extrudeVertices(self, extrusion_factor, z0=0):
"""Extrudes the vertices of a shape and returns the three dimensional values
"""
a = self.triangles3()
vertices = []
for coord in a:
for dec in coord:
vertices.append(dec[0]) #x-axis
vertices.append(dec[1]) #y-axis
vertices.append(z0) #z-axis
for coord in a:
for dec in reversed(coord):
vertices.append(dec[0]) #x-axis
vertices.append(dec[1]) #y-axis
vertices.append(z0 + extrusion_factor) #z-axi
top_edges = self.exteriorpoints_3d(z0=z0)
bottom_edges = self.exteriorpoint_3d(z0=z0 + extrusion_factor)
sideTriangles = list(zip(top_edges, top_edges[1:] + top_edges[:1], bottom_edges))
sideTriangles2 = list(zip(bottom_edges[1:] + bottom_edges[:1], bottom_edges, top_edges[1:] + top_edges[:1]))
sideTriangles.extend(sideTriangles2)
sideTriangles = [list(triangle) for triangle in sideTriangles]
import itertools
sideTriangles = list(itertools.chain.from_iterable(sideTriangles))
sideTriangles = [list(point) for point in sideTriangles]
sideTriangles = list(itertools.chain.from_iterable(sideTriangles))
vertices.extend(sideTriangles)
return vertices
class GenericCircle(GenericShapeBase):
@classmethod
def condition_loop(cls,loop):
cls._condition_loop(loop,remove_loop_reduncancy=False,remove_forward_redundancy=False)
def outputinteractive(self):
from popupcad.graphics2d.interactive import InteractiveCircle
return InteractiveCircle(self)
def outputstatic(self, *args, **kwargs):
from popupcad.graphics2d.static import StaticCircle
return StaticCircle(self, *args, **kwargs)
def gen_painterpath(self, exterior, interiors):
path = qg.QPainterPath()
center = numpy.array(exterior[0])
edge = numpy.array(exterior[1])
v = edge - center
r = v.dot(v)**.5
point1 = center - r
point2 = center + r
point1 = qc.QPointF(*point1)
point2 = qc.QPointF(*point2)
rect = qc.QRectF(point1, point2)
path.addEllipse(rect)
return path
def to_shapely(self,scaling = 1):
exterior_p = self.exteriorpoints(scaling = scaling)
exterior = numpy.array(exterior_p)
center = exterior[0]
v = exterior[1] - exterior[0]
r = v.dot(v)**.5
obj = shapely.geometry.Point(*center).buffer(r)
obj = sg.Polygon(obj.boundary)
return obj
def segments(self):
return self.segments_closed()
class GenericTwoPointRect(GenericShapeBase):
@classmethod
def condition_loop(cls,loop):
return cls._condition_loop(loop,remove_loop_reduncancy=False,remove_forward_redundancy=False)
def outputinteractive(self):
from popupcad.graphics2d.interactive import InteractiveRect2Point
return InteractiveRect2Point(self)
def outputstatic(self, *args, **kwargs):
from popupcad.graphics2d.static import StaticRect2Point
return StaticRect2Point(self, *args, **kwargs)
def gen_painterpath(self, exterior, interiors):
path = qg.QPainterPath()
points = [qc.QPointF(*point) for point in exterior]
rect = qc.QRectF(*points)
path.addRect(rect)
return path
def to_shapely(self,scaling = 1):
exterior_p = self.exteriorpoints(scaling = scaling)
corner1 = exterior_p[0]
corner2 = (exterior_p[0][0], exterior_p[1][1])
corner3 = exterior_p[1]
corner4 = (exterior_p[1][0], exterior_p[0][1])
corners = [corner1, corner2, corner3, corner4]
obj = sg.Polygon(corners)
return obj
def segments(self):
return self.segments_closed()
if __name__=='__main__':
a = GenericPoly.gen_from_point_lists([[0,0],[0,1],[1,2],[2,1],[2,-1],[1,-2],[0,-1]],[])
#` area,centroid,I= a.mass_props(1,-.1,.1)\
z_lower = -.1
z_upper = .1
density = 1
area,centroid,volume,mass,tris = a.mass_properties(density,z_lower,z_upper)
about_point = centroid
I = a.inertia_tensor(about_point,density,z_lower,z_upper,tris)
area2 = a.trueArea()
print(area,area2)
|
|
import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app, request
from flask.ext.login import UserMixin, AnonymousUserMixin
from . import db, login_manager
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.datetime.utcnow)
avatar_hash = db.Column(db.String(32))
votes = db.relationship('Vote', backref='user', lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py
seed()
for i in range(count):
u = User(email=forgery_py.internet.email_address(),
username=forgery_py.internet.user_name(True),
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['FLASKY_ADMIN']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.datetime.utcnow()
db.session.add(self)
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(
self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Restaurant(db.Model):
__tablename__ = 'restaurant'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(), unique=True, index=True)
phone = db.Column(db.String())
coordinates = db.Column(db.String())
menu_url = db.Column(db.String(255))
logo_path = db.Column(db.String())
menus = db.relationship('Menu', backref='restaurant', lazy='dynamic')
votes = db.relationship('Vote', backref='restaurant', lazy='dynamic')
def current_votes(self):
current_votes = Vote.query.filter_by(date=datetime.date.today()).filter_by(restaurant=self)
return current_votes.all()
def full_url(self):
if "monopol" in self.name.lower():
return self.menu_url + str(datetime.date.today().isocalendar()[1]).zfill(2) + "_KW.pdf"
return self.menu_url
class Menu(db.Model):
__tablename__ = 'menus'
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DATE, nullable=False)
content = db.Column(db.String(300), nullable=False)
restaurant_id = db.Column(db.Integer, db.ForeignKey('restaurant.id'))
class Vote(db.Model):
__tablename__ = 'votes'
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DATE, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
restaurant_id = db.Column(db.Integer, db.ForeignKey('restaurant.id'))
comment = db.Column(db.String(64))
|
|
# Copyright (c) 2006 Allan Saddi <allan@saddi.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
__author__ = 'Allan Saddi <allan@saddi.com>'
__version__ = '$Revision$'
import select
import struct
import socket
import errno
import types
__all__ = ['FCGIApp']
# Constants from the spec.
FCGI_LISTENSOCK_FILENO = 0
FCGI_HEADER_LEN = 8
FCGI_VERSION_1 = 1
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
FCGI_NULL_REQUEST_ID = 0
FCGI_KEEP_CONN = 1
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_MAX_CONNS = 'FCGI_MAX_CONNS'
FCGI_MAX_REQS = 'FCGI_MAX_REQS'
FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS'
FCGI_Header = '!BBHHBx'
FCGI_BeginRequestBody = '!HB5x'
FCGI_EndRequestBody = '!LB3x'
FCGI_UnknownTypeBody = '!B7x'
FCGI_BeginRequestBody_LEN = struct.calcsize(FCGI_BeginRequestBody)
FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody)
FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody)
if __debug__:
import time
# Set non-zero to write debug output to a file.
DEBUG = 0
DEBUGLOG = '/tmp/fcgi_app.log'
def _debug(level, msg):
if DEBUG < level:
return
try:
f = open(DEBUGLOG, 'a')
f.write('%sfcgi: %s\n' % (time.ctime()[4:-4], msg))
f.close()
except:
pass
def decode_pair(s, pos=0):
"""
Decodes a name/value pair.
The number of bytes decoded as well as the name/value pair
are returned.
"""
nameLength = ord(s[pos])
if nameLength & 128:
nameLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
valueLength = ord(s[pos])
if valueLength & 128:
valueLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
name = s[pos:pos+nameLength]
pos += nameLength
value = s[pos:pos+valueLength]
pos += valueLength
return (pos, (name, value))
def encode_pair(name, value):
"""
Encodes a name/value pair.
The encoded string is returned.
"""
nameLength = len(name)
if nameLength < 128:
s = chr(nameLength)
else:
s = struct.pack('!L', nameLength | 0x80000000L)
valueLength = len(value)
if valueLength < 128:
s += chr(valueLength)
else:
s += struct.pack('!L', valueLength | 0x80000000L)
return s + name + value
class Record(object):
"""
A FastCGI Record.
Used for encoding/decoding records.
"""
def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID):
self.version = FCGI_VERSION_1
self.type = type
self.requestId = requestId
self.contentLength = 0
self.paddingLength = 0
self.contentData = ''
def _recvall(sock, length):
"""
Attempts to receive length bytes from a socket, blocking if necessary.
(Socket may be blocking or non-blocking.)
"""
dataList = []
recvLen = 0
while length:
try:
data = sock.recv(length)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([sock], [], [])
continue
else:
raise
if not data: # EOF
break
dataList.append(data)
dataLen = len(data)
recvLen += dataLen
length -= dataLen
return ''.join(dataList), recvLen
_recvall = staticmethod(_recvall)
def read(self, sock):
"""Read and decode a Record from a socket."""
try:
header, length = self._recvall(sock, FCGI_HEADER_LEN)
except:
raise EOFError
if length < FCGI_HEADER_LEN:
raise EOFError
self.version, self.type, self.requestId, self.contentLength, \
self.paddingLength = struct.unpack(FCGI_Header, header)
if __debug__: _debug(9, 'read: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
if self.contentLength:
try:
self.contentData, length = self._recvall(sock,
self.contentLength)
except:
raise EOFError
if length < self.contentLength:
raise EOFError
if self.paddingLength:
try:
self._recvall(sock, self.paddingLength)
except:
raise EOFError
def _sendall(sock, data):
"""
Writes data to a socket and does not return until all the data is sent.
"""
length = len(data)
while length:
try:
sent = sock.send(data)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([], [sock], [])
continue
else:
raise
data = data[sent:]
length -= sent
_sendall = staticmethod(_sendall)
def write(self, sock):
"""Encode and write a Record to a socket."""
self.paddingLength = -self.contentLength & 7
if __debug__: _debug(9, 'write: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
header = struct.pack(FCGI_Header, self.version, self.type,
self.requestId, self.contentLength,
self.paddingLength)
self._sendall(sock, header)
if self.contentLength:
self._sendall(sock, self.contentData)
if self.paddingLength:
self._sendall(sock, '\x00'*self.paddingLength)
class FCGIApp(object):
def __init__(self, command=None, connect=None, host=None, port=None,
filterEnviron=True):
if host is not None:
assert port is not None
connect=(host, port)
assert (command is not None and connect is None) or \
(command is None and connect is not None)
self._command = command
self._connect = connect
self._filterEnviron = filterEnviron
#sock = self._getConnection()
#print self._fcgiGetValues(sock, ['FCGI_MAX_CONNS', 'FCGI_MAX_REQS', 'FCGI_MPXS_CONNS'])
#sock.close()
def __call__(self, environ, start_response):
# For sanity's sake, we don't care about FCGI_MPXS_CONN
# (connection multiplexing). For every request, we obtain a new
# transport socket, perform the request, then discard the socket.
# This is, I believe, how mod_fastcgi does things...
sock = self._getConnection()
# Since this is going to be the only request on this connection,
# set the request ID to 1.
requestId = 1
# Begin the request
rec = Record(FCGI_BEGIN_REQUEST, requestId)
rec.contentData = struct.pack(FCGI_BeginRequestBody, FCGI_RESPONDER, 0)
rec.contentLength = FCGI_BeginRequestBody_LEN
rec.write(sock)
# Filter WSGI environ and send it as FCGI_PARAMS
if self._filterEnviron:
params = self._defaultFilterEnviron(environ)
else:
params = self._lightFilterEnviron(environ)
# TODO: Anything not from environ that needs to be sent also?
self._fcgiParams(sock, requestId, params)
self._fcgiParams(sock, requestId, {})
# Transfer wsgi.input to FCGI_STDIN
content_length = int(environ.get('CONTENT_LENGTH') or 0)
while True:
chunk_size = min(content_length, 4096)
s = environ['wsgi.input'].read(chunk_size)
content_length -= len(s)
rec = Record(FCGI_STDIN, requestId)
rec.contentData = s
rec.contentLength = len(s)
rec.write(sock)
if not s: break
# Empty FCGI_DATA stream
rec = Record(FCGI_DATA, requestId)
rec.write(sock)
# Main loop. Process FCGI_STDOUT, FCGI_STDERR, FCGI_END_REQUEST
# records from the application.
result = []
while True:
inrec = Record()
inrec.read(sock)
if inrec.type == FCGI_STDOUT:
if inrec.contentData:
result.append(inrec.contentData)
else:
# TODO: Should probably be pedantic and no longer
# accept FCGI_STDOUT records?
pass
elif inrec.type == FCGI_STDERR:
# Simply forward to wsgi.errors
environ['wsgi.errors'].write(inrec.contentData)
elif inrec.type == FCGI_END_REQUEST:
# TODO: Process appStatus/protocolStatus fields?
break
# Done with this transport socket, close it. (FCGI_KEEP_CONN was not
# set in the FCGI_BEGIN_REQUEST record we sent above. So the
# application is expected to do the same.)
sock.close()
result = ''.join(result)
# Parse response headers from FCGI_STDOUT
status = '200 OK'
headers = []
pos = 0
while True:
eolpos = result.find('\n', pos)
if eolpos < 0: break
line = result[pos:eolpos-1]
pos = eolpos + 1
# strip in case of CR. NB: This will also strip other
# whitespace...
line = line.strip()
# Empty line signifies end of headers
if not line: break
# TODO: Better error handling
header, value = line.split(':', 1)
header = header.strip().lower()
value = value.strip()
if header == 'status':
# Special handling of Status header
status = value
if status.find(' ') < 0:
# Append a dummy reason phrase if one was not provided
status += ' FCGIApp'
else:
headers.append((header, value))
result = result[pos:]
# Set WSGI status, headers, and return result.
start_response(status, headers)
return [result]
def _getConnection(self):
if self._connect is not None:
# The simple case. Create a socket and connect to the
# application.
if isinstance(self._connect, types.StringTypes):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self._connect)
elif hasattr(socket, 'create_connection'):
sock = socket.create_connection(self._connect)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(self._connect)
return sock
# To be done when I have more time...
raise NotImplementedError, 'Launching and managing FastCGI programs not yet implemented'
def _fcgiGetValues(self, sock, vars):
# Construct FCGI_GET_VALUES record
outrec = Record(FCGI_GET_VALUES)
data = []
for name in vars:
data.append(encode_pair(name, ''))
data = ''.join(data)
outrec.contentData = data
outrec.contentLength = len(data)
outrec.write(sock)
# Await response
inrec = Record()
inrec.read(sock)
result = {}
if inrec.type == FCGI_GET_VALUES_RESULT:
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
result[name] = value
return result
def _fcgiParams(self, sock, requestId, params):
rec = Record(FCGI_PARAMS, requestId)
data = []
for name,value in params.items():
data.append(encode_pair(name, value))
data = ''.join(data)
rec.contentData = data
rec.contentLength = len(data)
rec.write(sock)
_environPrefixes = ['SERVER_', 'HTTP_', 'REQUEST_', 'REMOTE_', 'PATH_',
'CONTENT_']
_environCopies = ['SCRIPT_NAME', 'QUERY_STRING', 'AUTH_TYPE']
_environRenames = {}
def _defaultFilterEnviron(self, environ):
result = {}
for n in environ.keys():
for p in self._environPrefixes:
if n.startswith(p):
result[n] = environ[n]
if n in self._environCopies:
result[n] = environ[n]
if n in self._environRenames:
result[self._environRenames[n]] = environ[n]
return result
def _lightFilterEnviron(self, environ):
result = {}
for n in environ.keys():
if n.upper() == n:
result[n] = environ[n]
return result
if __name__ == '__main__':
from flup.server.ajp import WSGIServer
app = FCGIApp(connect=('localhost', 4242))
#import paste.lint
#app = paste.lint.middleware(app)
WSGIServer(app).run()
|
|
#!/usr/bin/env python
"""Process that loads the system policy"""
__author__ = 'Stephen P. Henrie'
"""
Process that loads the system policy
"""
from pyon.core.governance import get_system_actor, get_system_actor_header
from pyon.public import CFG, log, ImmediateProcess, iex, Container, IonObject, RT, OT
from interface.services.coi.iidentity_management_service import IdentityManagementServiceProcessClient
from interface.services.coi.iorg_management_service import OrgManagementServiceProcessClient
from interface.services.coi.ipolicy_management_service import PolicyManagementServiceProcessClient
class LoadSystemPolicy(ImmediateProcess):
"""
bin/pycc -x ion.processes.bootstrap.load_system_policy.LoadSystemPolicy op=load
"""
def on_init(self):
pass
def on_start(self):
op = self.CFG.get("op", None)
log.info("LoadSystemPolicy: {op=%s}" % op)
if op:
if op == "load":
self.op_load_system_policies(self)
else:
raise iex.BadRequest("Operation unknown")
else:
raise iex.BadRequest("No operation specified")
def on_quit(self):
pass
#
# Create the initial set of policy rules for the ION system. To make the rules easier to write, start by
# denying all anonymous access to Org services and then add rules which Permit access to specific operations
# based on conditions.
#
@classmethod
def op_load_system_policies(cls, calling_process):
org_client = OrgManagementServiceProcessClient(node=Container.instance.node, process=calling_process)
ion_org = org_client.find_org()
id_client = IdentityManagementServiceProcessClient(node=Container.instance.node, process=calling_process )
system_actor = get_system_actor()
log.info('system actor:' + system_actor._id)
sa_user_header = get_system_actor_header()
policy_client = PolicyManagementServiceProcessClient(node=Container.instance.node, process=calling_process)
timeout = 20
##############
'''
This rule must be loaded before the Deny_Everything rule
'''
policy_client = PolicyManagementServiceProcessClient(node=Container.instance.node, process=calling_process)
policy_text = '''
<Rule RuleId="%s:" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ION_MANAGER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
</Rule>
'''
policy_id = policy_client.create_common_service_access_policy( 'ION_Manager_Permit_Everything',
'A global policy rule that permits access to everything with the ION Manager role',
policy_text, headers=sa_user_header)
##############
'''
This rule must be loaded before the Deny_Everything rule
'''
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">service</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:receiver-type" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-regexp-match">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">read_.*$</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">READ</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-regexp-match">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">find_.*$</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">FIND</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-regexp-match">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">get_.*$</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">GET</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-bag">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">find_enrolled_users</AttributeValue>
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">find_enrolled_orgs</AttributeValue>
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">find_org_negotiations</AttributeValue>
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">find_org_closed_negotiations</AttributeValue>
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">initiate_realtime_visualization_data</AttributeValue>
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">get_realtime_visualization_data</AttributeValue>
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">terminate_realtime_visualization_data</AttributeValue>
</Apply>
<ActionAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
</Condition>
</Rule>
'''
policy_id = policy_client.create_common_service_access_policy( 'Allowed_Anonymous_Service_Operations',
'A global policy rule which specifies operations that are allowed with anonymous access',
policy_text, headers=sa_user_header)
##############
#This rule has been modified specifically for 2.0 to Deny for only specific services and agents. Everything else will be allowed.
policy_text = '''
<Rule RuleId="%s:" Effect="Deny">
<Description>
%s
</Description>
<Target>
<!-- REMOVE THE RESOURCE TARGETS BELOW AFTER 2.0 TO TIGHTEN POLICY -->
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">org_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">policy_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">instrument_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">observatory_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">InstrumentDevice</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">PlatformDevice</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">identity_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">scheduler</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">resource_registry</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">visualization</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">user_notification</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">data_product_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
</Target>
</Rule>
'''
policy_id = policy_client.create_common_service_access_policy( 'Deny_Everything', 'A global policy rule that denies access to everything by default',
policy_text, headers=sa_user_header)
##############
###THIS POLICY HAS BEEN COMMENTED OUT FOR THE INITIAL R2 RELEASE
"""
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">service</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:receiver-type" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-regexp-match">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">create_.*$</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">CREATE</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-regexp-match">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">update_.*$</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">UPDATE</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-regexp-match">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">delete_.*$</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">RETIRE</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">INSTRUMENT_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">OBSERVATORY_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">DATA_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MANAGER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
</Rule> '''
policy_id = policy_client.create_common_service_access_policy( 'Allowed_CUD_Service_Operations_for_Roles',
'A global policy rule which specifies operations that are allowed with for OPERATOR AND MANAGER roles',
policy_text, headers=sa_user_header)
"""
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">resource_registry</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">INSTRUMENT_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">OBSERVATORY_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">DATA_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MANAGER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:and">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-bag">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">DELETE</AttributeValue>
</Apply>
<ActionAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-bag">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">UPDATE</AttributeValue>
</Apply>
<ActionAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-bag">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">create_attachment</AttributeValue>
</Apply>
<ActionAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-bag">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">LCS-CHANGE</AttributeValue>
</Apply>
<ActionAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('resource_registry', 'RR_Permitted_Operations',
'Permit these operations in the Resource Registry Service for the proper roles',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">resource_registry</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">create_attachment</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">INSTRUMENT_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">OBSERVATORY_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">DATA_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MANAGER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MEMBER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:evaluate-function">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">check_attachment_policy</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:param-dict" DataType="http://www.w3.org/2001/XMLSchema#dict"/>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('resource_registry', 'RR_create_attachment_Operation',
'Permit create attachment operation only in the context of the org to which the user belongs',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">resource_registry</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">LCS-CHANGE</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:evaluate-function">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">check_lifecycle_policy</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:param-dict" DataType="http://www.w3.org/2001/XMLSchema#dict"/>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('resource_registry', 'RR_LCS_Operation',
'Permit lifecycle change operation only in the context of the org to which the user belongs',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">resource_registry</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">UPDATE</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:evaluate-function">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">check_edit_policy</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:param-dict" DataType="http://www.w3.org/2001/XMLSchema#dict"/>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('resource_registry', 'RR_edit_Operation',
'Permit edit operation only in the context of the org to which the user belongs',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">identity_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">prepare_user_info_support</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">signon</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">create_user_info</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">update_user_info</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">delete_user_info</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">initiate_account_merge</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">complete_account_merge</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">check_authentication_token</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">anonymous</AttributeValue>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-one-and-only">
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('identity_management', 'IDS_Permitted_Non_Anonymous',
'Permit these operations in the Identity Management Service if the user is not anonymous',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">org_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MANAGER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-bag">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">DELETE</AttributeValue>
</Apply>
<ActionAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('org_management', 'OrgMS_Org_Manager_Role_Permitted',
'Permit these operations in the Org Management Service for the role of Org Manager',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">policy_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MANAGER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-bag">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">DELETE</AttributeValue>
</Apply>
<ActionAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('policy_management', 'PolicyMS_Org_Manager_Role_Permitted',
'Permit these operations in the Policy Management Service for the role of Org Manager',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">org_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">release_commitment</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">INSTRUMENT_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">OBSERVATORY_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">DATA_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
</Rule> '''
policy_id = policy_client.create_service_access_policy('org_management', 'OrgMS_Role_Permitted',
'Permit these operations in the Org Management Service for the operator roles',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">org_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">negotiate</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MEMBER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
</Rule> '''
policy_id = policy_client.create_service_access_policy('org_management', 'OMS_Org_Member_Role_Permitted',
'Permit these operations in the Org Management Service for any user that is a simple Member of the Org',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">observatory_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">OBSERVATORY_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MANAGER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:and">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-bag">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">DELETE</AttributeValue>
</Apply>
<ActionAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-bag">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">activate_deployment</AttributeValue>
</Apply>
<ActionAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-bag">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">deactivate_deployment</AttributeValue>
</Apply>
<ActionAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('observatory_management', 'OBS_Role_Permitted_Operations',
'Permit these operations in the Observatory Management Service for role of Observatory Operator or Org Manager',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">observatory_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">activate_deployment</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">deactivate_deployment</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">OBSERVATORY_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MANAGER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:evaluate-function">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">check_deployment_activation_policy</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:param-dict" DataType="http://www.w3.org/2001/XMLSchema#dict"/>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('observatory_management', 'OBS_Permitted_Deployment_Operations',
'Permit deployment activation/deactivation in the Observatory Management Service for role of Observatory Operator or Org Manager',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">instrument_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">start_instrument_agent_instance</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">stop_instrument_agent_instance</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:evaluate-function">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">check_start_stop_policy</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:param-dict" DataType="http://www.w3.org/2001/XMLSchema#dict"/>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('instrument_management', 'IMS_Role_Start_Stop_Permissions',
'Permit these operations in the Instrument Management Service for role of Instrument Operator',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">instrument_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">OBSERVATORY_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MANAGER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">INSTRUMENT_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:and">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-bag">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">DELETE</AttributeValue>
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">LCS-CHANGE</AttributeValue>
</Apply>
<ActionAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-bag">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">start_instrument_agent_instance</AttributeValue>
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">stop_instrument_agent_instance</AttributeValue>
</Apply>
<ActionAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('instrument_management', 'IMS_Higher_Role_Permitted_Operations',
'Permit these operations in the Instrument Management Service for role of Observatory Operator or Org Manager',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">instrument_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">LCS-CHANGE</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">OBSERVATORY_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MANAGER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">INSTRUMENT_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:evaluate-function">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">check_lifecycle_policy</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:param-dict" DataType="http://www.w3.org/2001/XMLSchema#dict"/>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('instrument_management', 'IMS_Check_Lifecycle_Operations',
'Call the check_lifecycle_policy operation in the IMS',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">data_product_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">INSTRUMENT_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">DATA_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">OBSERVATORY_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MANAGER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:and">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-bag">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">DELETE</AttributeValue>
</Apply>
<ActionAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-verb"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-bag">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">activate_data_product_persistence</AttributeValue>
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">suspend_data_product_persistence</AttributeValue>
</Apply>
<ActionAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('data_product_management', 'DPMS_Role_Permitted_Operations',
'Permit these operations in the Data Product Management Service for role of Instrument Operator, Data Operator, Observatory Operator or Org Manager',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">data_product_management</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">activate_data_product_persistence</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">suspend_data_product_persistence</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:evaluate-function">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">check_dpms_policy</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:param-dict" DataType="http://www.w3.org/2001/XMLSchema#dict"/>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('data_product_management', 'DPMS_Role_Permitted_Specific_Operations',
'Permit these specific operations in the Data Product Management Service for role of Instrument Operator, Data Operator, Observatory Operator or Org Manager',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">scheduler</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">user_notification</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">INSTRUMENT_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">OBSERVATORY_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">DATA_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MANAGER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-at-least-one-member-of">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-bag">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">publish_event</AttributeValue>
</Apply>
<ActionAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('scheduler', 'Scheduler_Permitted_Operations',
'Permit these operations in the Scheduler Service for the proper roles',
policy_text, headers=sa_user_header)
policy_id = policy_client.create_service_access_policy('user_notification', 'User_Notification_Permitted_Operations',
'Permit these operations in the User Notification Service for the proper roles',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">user_notification</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MEMBER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">create_notification</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">delete_notification</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:evaluate-function">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">check_subscription_policy</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:param-dict" DataType="http://www.w3.org/2001/XMLSchema#dict"/>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('user_notification', 'Subscription_For_Members',
'Permit subscriptions for org members',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">user_notification</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MEMBER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">publish_event</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:evaluate-function">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">check_publish_event_policy</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:param-dict" DataType="http://www.w3.org/2001/XMLSchema#dict"/>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('user_notification', 'Publish_issue_event_for_ION_members',
'Permit registered users to publish issue event',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">visualization</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">initiate_realtime_visualization_data</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">get_realtime_visualization_data</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">terminate_realtime_visualization_data</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:not">
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">anonymous</AttributeValue>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:string-one-and-only">
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</Apply>
</Apply>
</Apply>
</Condition>
</Rule> '''
policy_id = policy_client.create_service_access_policy('visualization', 'Vis_Service_Permitted_Non_Anonymous',
'Permit these operations in the Visualization Service if the user is not anonymous',
policy_text, headers=sa_user_header)
##############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">InstrumentDevice</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">PlatformDevice</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">agent</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:receiver-type" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">negotiate</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">get_capabilities</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MEMBER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
</Rule> '''
#All resource_agents are kind of handled the same - but the resource-id in the rule is set to the specific type
policy_id = policy_client.create_service_access_policy('InstrumentDevice', 'Instrument_Agent_Org_Member_Permitted',
'Permit these operations in an instrument agent for a Member of the Org',
policy_text, headers=sa_user_header)
#All resource_agents are kind of handled the same - but the resource-id in the rule is set to the specific type
policy_id = policy_client.create_service_access_policy('PlatformDevice', 'Platform_Agent_Org_Member_Permitted',
'Permit these operations in an platform agent for a Member of the Org',
policy_text, headers=sa_user_header)
#############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">InstrumentDevice</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">PlatformDevice</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">agent</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:receiver-type" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-regexp-match">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">get_.*$</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MANAGER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">OBSERVATORY_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">INSTRUMENT_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
</Rule> '''
#All resource_agents are kind of handled the same - but the resource-id in the rule is set to the specific type
policy_id = policy_client.create_service_access_policy('InstrumentDevice', 'Instrument_Agent_Operator_Manager_Permitted',
'Permit these operations in an instrument agent any user with the proper Operator or Manager role.',
policy_text, headers=sa_user_header)
#All resource_agents are kind of handled the same - but the resource-id in the rule is set to the specific type
policy_id = policy_client.create_service_access_policy('PlatformDevice', 'Platform_Agent_Operator_Manager_Permitted',
'Permit these operations in an platform agent any user with the proper Operator or Manager role.',
policy_text, headers=sa_user_header)
#############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">InstrumentDevice</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">PlatformDevice</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">agent</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:receiver-type" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">set_resource</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">execute_resource</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ping_resource</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">INSTRUMENT_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">OBSERVATORY_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MANAGER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:evaluate-function">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">check_resource_operation_policy</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:param-dict" DataType="http://www.w3.org/2001/XMLSchema#dict"/>
</Apply>
</Condition>
</Rule> '''
#All resource_agents are kind of handled the same - but the resource-id in the rule is set to the specific type
policy_id = policy_client.create_service_access_policy('InstrumentDevice', 'Instrument_Agent_Check_Resource_Operation_Policy',
'Call the check_resource_operation_policy operation in the Instrument Agent',
policy_text, headers=sa_user_header)
#All resource_agents are kind of handled the same - but the resource-id in the rule is set to the specific type
policy_id = policy_client.create_service_access_policy('PlatformDevice', 'Platform_Agent_Check_Resource_Operation_Policy',
'Call the check_resource_operation_policy operation in the Platform Agent',
policy_text, headers=sa_user_header)
#############
policy_text = '''
<Rule RuleId="%s" Effect="Permit">
<Description>
%s
</Description>
<Target>
<Resources>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">InstrumentDevice</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">PlatformDevice</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:resource-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
<Resource>
<ResourceMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">agent</AttributeValue>
<ResourceAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:resource:receiver-type" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ResourceMatch>
</Resource>
</Resources>
<Actions>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">set_agent</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">execute_agent</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
<Action>
<ActionMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ping_agent</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:action-id" DataType="http://www.w3.org/2001/XMLSchema#string"/>
</ActionMatch>
</Action>
</Actions>
<Subjects>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">INSTRUMENT_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">OBSERVATORY_OPERATOR</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
<Subject>
<SubjectMatch MatchId="urn:oasis:names:tc:xacml:1.0:function:string-equal">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">ORG_MANAGER</AttributeValue>
<SubjectAttributeDesignator
AttributeId="urn:oasis:names:tc:xacml:1.0:subject:subject-role-id"
DataType="http://www.w3.org/2001/XMLSchema#string"/>
</SubjectMatch>
</Subject>
</Subjects>
</Target>
<Condition>
<Apply FunctionId="urn:oasis:names:tc:xacml:1.0:function:evaluate-function">
<AttributeValue DataType="http://www.w3.org/2001/XMLSchema#string">check_agent_operation_policy</AttributeValue>
<ActionAttributeDesignator AttributeId="urn:oasis:names:tc:xacml:1.0:action:param-dict" DataType="http://www.w3.org/2001/XMLSchema#dict"/>
</Apply>
</Condition>
</Rule> '''
#All resource_agents are kind of handled the same - but the resource-id in the rule is set to the specific type
policy_id = policy_client.create_service_access_policy('InstrumentDevice', 'Instrument_Agent_Check_Agent_Operation_Policy',
'Call the check_agent_operation_policy operation in the Instrument Agent',
policy_text, headers=sa_user_header)
#All resource_agents are kind of handled the same - but the resource-id in the rule is set to the specific type
policy_id = policy_client.create_service_access_policy('PlatformDevice', 'Platform_Agent_Check_Agent_Operation_Policy',
'Call the check_agent_operation_policy operation in the Platform Agent',
policy_text, headers=sa_user_header)
pol_id = policy_client.add_process_operation_precondition_policy(process_name=RT.InstrumentDevice, op='get_resource',
policy_content='check_if_direct_access_mode', headers=sa_user_header )
pol_id = policy_client.add_process_operation_precondition_policy(process_name=RT.InstrumentDevice, op='set_resource',
policy_content='check_if_direct_access_mode', headers=sa_user_header )
pol_id = policy_client.add_process_operation_precondition_policy(process_name=RT.InstrumentDevice, op='execute_resource',
policy_content='check_if_direct_access_mode', headers=sa_user_header )
pol_id = policy_client.add_process_operation_precondition_policy(process_name=RT.PlatformDevice, op='get_resource',
policy_content='check_if_direct_access_mode', headers=sa_user_header )
pol_id = policy_client.add_process_operation_precondition_policy(process_name=RT.PlatformDevice, op='set_resource',
policy_content='check_if_direct_access_mode', headers=sa_user_header )
pol_id = policy_client.add_process_operation_precondition_policy(process_name=RT.PlatformDevice, op='execute_resource',
policy_content='check_if_direct_access_mode', headers=sa_user_header )
|
|
#!/usr/bin/env python
############################################################################
#
# Copyright (C) 2012, 2013 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
#
# Serial firmware uploader for the PX4FMU bootloader
#
# The PX4 firmware file is a JSON-encoded Python object, containing
# metadata fields and a zlib-compressed base64-encoded firmware image.
#
# The uploader uses the following fields from the firmware file:
#
# image
# The firmware that will be uploaded.
# image_size
# The size of the firmware in bytes.
# board_id
# The board for which the firmware is intended.
# board_revision
# Currently only used for informational purposes.
#
# for python2.7 compatibility
from __future__ import print_function
import sys
import argparse
import binascii
import serial
import struct
import json
import zlib
import base64
import time
import array
from sys import platform as _platform
class firmware(object):
'''Loads a firmware file'''
desc = {}
image = bytes()
crctab = array.array('I', [
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d])
crcpad = bytearray(b'\xff\xff\xff\xff')
def __init__(self, path):
# read the file
f = open(path, "r")
self.desc = json.load(f)
f.close()
self.image = bytearray(zlib.decompress(base64.b64decode(self.desc['image'])))
# pad image to 4-byte length
while ((len(self.image) % 4) != 0):
self.image.append('\xff')
def property(self, propname):
return self.desc[propname]
def __crc32(self, bytes, state):
for byte in bytes:
index = (state ^ byte) & 0xff
state = self.crctab[index] ^ (state >> 8)
return state
def crc(self, padlen):
state = self.__crc32(self.image, int(0))
for i in range(len(self.image), (padlen - 1), 4):
state = self.__crc32(self.crcpad, state)
return state
class uploader(object):
'''Uploads a firmware file to the PX FMU bootloader'''
# protocol bytes
INSYNC = b'\x12'
EOC = b'\x20'
# reply bytes
OK = b'\x10'
FAILED = b'\x11'
INVALID = b'\x13' # rev3+
# command bytes
NOP = b'\x00' # guaranteed to be discarded by the bootloader
GET_SYNC = b'\x21'
GET_DEVICE = b'\x22'
CHIP_ERASE = b'\x23'
CHIP_VERIFY = b'\x24' # rev2 only
PROG_MULTI = b'\x27'
READ_MULTI = b'\x28' # rev2 only
GET_CRC = b'\x29' # rev3+
GET_OTP = b'\x2a' # rev4+ , get a word from OTP area
GET_SN = b'\x2b' # rev4+ , get a word from SN area
REBOOT = b'\x30'
INFO_BL_REV = b'\x01' # bootloader protocol revision
BL_REV_MIN = 2 # minimum supported bootloader protocol
BL_REV_MAX = 4 # maximum supported bootloader protocol
INFO_BOARD_ID = b'\x02' # board type
INFO_BOARD_REV = b'\x03' # board revision
INFO_FLASH_SIZE = b'\x04' # max firmware size in bytes
PROG_MULTI_MAX = 60 # protocol max is 255, must be multiple of 4
READ_MULTI_MAX = 60 # protocol max is 255, something overflows with >= 64
NSH_INIT = bytearray(b'\x0d\x0d\x0d')
NSH_REBOOT_BL = b"reboot -b\n"
NSH_REBOOT = b"reboot\n"
MAVLINK_REBOOT_ID1 = bytearray(b'\xfe\x21\x72\xff\x00\x4c\x00\x00\x80\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x00\x01\x00\x00\x48\xf0')
MAVLINK_REBOOT_ID0 = bytearray(b'\xfe\x21\x45\xff\x00\x4c\x00\x00\x80\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x00\x00\x00\x00\xd7\xac')
def __init__(self, portname, baudrate):
# open the port, keep the default timeout short so we can poll quickly
self.port = serial.Serial(portname, baudrate, timeout=0.5)
self.otp = b''
self.sn = b''
def close(self):
if self.port is not None:
self.port.close()
def __send(self, c):
# print("send " + binascii.hexlify(c))
self.port.write(c)
def __recv(self, count=1):
c = self.port.read(count)
if len(c) < 1:
raise RuntimeError("timeout waiting for data")
# print("recv " + binascii.hexlify(c))
return c
def __recv_int(self):
raw = self.__recv(4)
val = struct.unpack("<I", raw)
return val[0]
def __getSync(self):
self.port.flush()
c = bytes(self.__recv())
if c != self.INSYNC:
raise RuntimeError("unexpected %s instead of INSYNC" % c)
c = self.__recv()
if c == self.INVALID:
raise RuntimeError("bootloader reports INVALID OPERATION")
if c == self.FAILED:
raise RuntimeError("bootloader reports OPERATION FAILED")
if c != self.OK:
raise RuntimeError("unexpected response 0x%x instead of OK" % ord(c))
# attempt to get back into sync with the bootloader
def __sync(self):
# send a stream of ignored bytes longer than the longest possible conversation
# that we might still have in progress
# self.__send(uploader.NOP * (uploader.PROG_MULTI_MAX + 2))
self.port.flushInput()
self.__send(uploader.GET_SYNC
+ uploader.EOC)
self.__getSync()
# def __trySync(self):
# c = self.__recv()
# if (c != self.INSYNC):
# #print("unexpected 0x%x instead of INSYNC" % ord(c))
# return False;
# c = self.__recv()
# if (c != self.OK):
# #print("unexpected 0x%x instead of OK" % ord(c))
# return False
# return True
# send the GET_DEVICE command and wait for an info parameter
def __getInfo(self, param):
self.__send(uploader.GET_DEVICE + param + uploader.EOC)
value = self.__recv_int()
self.__getSync()
return value
# send the GET_OTP command and wait for an info parameter
def __getOTP(self, param):
t = struct.pack("I", param) # int param as 32bit ( 4 byte ) char array.
self.__send(uploader.GET_OTP + t + uploader.EOC)
value = self.__recv(4)
self.__getSync()
return value
# send the GET_OTP command and wait for an info parameter
def __getSN(self, param):
t = struct.pack("I", param) # int param as 32bit ( 4 byte ) char array.
self.__send(uploader.GET_SN + t + uploader.EOC)
value = self.__recv(4)
self.__getSync()
return value
# send the CHIP_ERASE command and wait for the bootloader to become ready
def __erase(self):
self.__send(uploader.CHIP_ERASE
+ uploader.EOC)
# erase is very slow, give it 20s
deadline = time.time() + 20
while time.time() < deadline:
try:
self.__getSync()
return
except RuntimeError:
# we timed out, that's OK
continue
raise RuntimeError("timed out waiting for erase")
# send a PROG_MULTI command to write a collection of bytes
def __program_multi(self, data):
if runningPython3 == True:
length = len(data).to_bytes(1, byteorder='big')
else:
length = chr(len(data))
self.__send(uploader.PROG_MULTI)
self.__send(length)
self.__send(data)
self.__send(uploader.EOC)
self.__getSync()
# verify multiple bytes in flash
def __verify_multi(self, data):
if runningPython3 == True:
length = len(data).to_bytes(1, byteorder='big')
else:
length = chr(len(data))
self.__send(uploader.READ_MULTI)
self.__send(length)
self.__send(uploader.EOC)
self.port.flush()
programmed = self.__recv(len(data))
if programmed != data:
print("got " + binascii.hexlify(programmed))
print("expect " + binascii.hexlify(data))
return False
self.__getSync()
return True
# send the reboot command
def __reboot(self):
self.__send(uploader.REBOOT
+ uploader.EOC)
self.port.flush()
# v3+ can report failure if the first word flash fails
if self.bl_rev >= 3:
self.__getSync()
# split a sequence into a list of size-constrained pieces
def __split_len(self, seq, length):
return [seq[i:i+length] for i in range(0, len(seq), length)]
# upload code
def __program(self, fw):
code = fw.image
groups = self.__split_len(code, uploader.PROG_MULTI_MAX)
for bytes in groups:
self.__program_multi(bytes)
# verify code
def __verify_v2(self, fw):
self.__send(uploader.CHIP_VERIFY
+ uploader.EOC)
self.__getSync()
code = fw.image
groups = self.__split_len(code, uploader.READ_MULTI_MAX)
for bytes in groups:
if (not self.__verify_multi(bytes)):
raise RuntimeError("Verification failed")
def __verify_v3(self, fw):
expect_crc = fw.crc(self.fw_maxsize)
self.__send(uploader.GET_CRC
+ uploader.EOC)
report_crc = self.__recv_int()
self.__getSync()
if report_crc != expect_crc:
print("Expected 0x%x" % expect_crc)
print("Got 0x%x" % report_crc)
raise RuntimeError("Program CRC failed")
# get basic data about the board
def identify(self):
# make sure we are in sync before starting
self.__sync()
# get the bootloader protocol ID first
self.bl_rev = self.__getInfo(uploader.INFO_BL_REV)
if (self.bl_rev < uploader.BL_REV_MIN) or (self.bl_rev > uploader.BL_REV_MAX):
print("Unsupported bootloader protocol %d" % uploader.INFO_BL_REV)
raise RuntimeError("Bootloader protocol mismatch")
self.board_type = self.__getInfo(uploader.INFO_BOARD_ID)
self.board_rev = self.__getInfo(uploader.INFO_BOARD_REV)
self.fw_maxsize = self.__getInfo(uploader.INFO_FLASH_SIZE)
# upload the firmware
def upload(self, fw):
# Make sure we are doing the right thing
if self.board_type != fw.property('board_id'):
raise RuntimeError("Firmware not suitable for this board")
if self.fw_maxsize < fw.property('image_size'):
raise RuntimeError("Firmware image is too large for this board")
# OTP added in v4:
if self.bl_rev > 3:
for byte in range(0,32*6,4):
x = self.__getOTP(byte)
self.otp = self.otp + x
print(binascii.hexlify(x).decode('Latin-1') + ' ', end='')
# see src/modules/systemlib/otp.h in px4 code:
self.otp_id = self.otp[0:4]
self.otp_idtype = self.otp[4:5]
self.otp_vid = self.otp[8:4:-1]
self.otp_pid = self.otp[12:8:-1]
self.otp_coa = self.otp[32:160]
# show user:
print("type: " + self.otp_id.decode('Latin-1'))
print("idtype: " + binascii.b2a_qp(self.otp_idtype).decode('Latin-1'))
print("vid: " + binascii.hexlify(self.otp_vid).decode('Latin-1'))
print("pid: "+ binascii.hexlify(self.otp_pid).decode('Latin-1'))
print("coa: "+ binascii.b2a_base64(self.otp_coa).decode('Latin-1'))
print("sn: ", end='')
for byte in range(0,12,4):
x = self.__getSN(byte)
x = x[::-1] # reverse the bytes
self.sn = self.sn + x
print(binascii.hexlify(x).decode('Latin-1'), end='') # show user
print('')
print("erase...")
self.__erase()
print("program...")
self.__program(fw)
print("verify...")
if self.bl_rev == 2:
self.__verify_v2(fw)
else:
self.__verify_v3(fw)
print("done, rebooting.")
self.__reboot()
self.port.close()
def send_reboot(self):
try:
# try reboot via NSH first
self.__send(uploader.NSH_INIT)
self.__send(uploader.NSH_REBOOT_BL)
self.__send(uploader.NSH_INIT)
self.__send(uploader.NSH_REBOOT)
# then try MAVLINK command
self.__send(uploader.MAVLINK_REBOOT_ID1)
self.__send(uploader.MAVLINK_REBOOT_ID0)
except:
return
# Detect python version
if sys.version_info[0] < 3:
runningPython3 = False
else:
runningPython3 = True
# Parse commandline arguments
parser = argparse.ArgumentParser(description="Firmware uploader for the PX autopilot system.")
parser.add_argument('--port', action="store", required=True, help="Serial port(s) to which the FMU may be attached")
parser.add_argument('--baud', action="store", type=int, default=115200, help="Baud rate of the serial port (default is 115200), only required for true serial ports.")
parser.add_argument('firmware', action="store", help="Firmware file to be uploaded")
args = parser.parse_args()
# Load the firmware file
fw = firmware(args.firmware)
print("Loaded firmware for %x,%x, waiting for the bootloader..." % (fw.property('board_id'), fw.property('board_revision')))
# Spin waiting for a device to show up
while True:
portlist = []
patterns = args.port.split(",")
# on unix-like platforms use glob to support wildcard ports. This allows
# the use of /dev/serial/by-id/usb-3D_Robotics on Linux, which prevents the upload from
# causing modem hangups etc
if "linux" in _platform or "darwin" in _platform:
import glob
for pattern in patterns:
portlist += glob.glob(pattern)
else:
portlist = patterns
for port in portlist:
#print("Trying %s" % port)
# create an uploader attached to the port
try:
if "linux" in _platform:
# Linux, don't open Mac OS and Win ports
if not "COM" in port and not "tty.usb" in port:
up = uploader(port, args.baud)
elif "darwin" in _platform:
# OS X, don't open Windows and Linux ports
if not "COM" in port and not "ACM" in port:
up = uploader(port, args.baud)
elif "win" in _platform:
# Windows, don't open POSIX ports
if not "/" in port:
up = uploader(port, args.baud)
except Exception:
# open failed, rate-limit our attempts
time.sleep(0.05)
# and loop to the next port
continue
# port is open, try talking to it
try:
# identify the bootloader
up.identify()
print("Found board %x,%x bootloader rev %x on %s" % (up.board_type, up.board_rev, up.bl_rev, port))
except Exception:
# most probably a timeout talking to the port, no bootloader, try to reboot the board
print("attempting reboot on %s..." % port)
up.send_reboot()
# wait for the reboot, without we might run into Serial I/O Error 5
time.sleep(0.5)
continue
try:
# ok, we have a bootloader, try flashing it
up.upload(fw)
except RuntimeError as ex:
# print the error
print("ERROR: %s" % ex.args)
finally:
# always close the port
up.close()
# we could loop here if we wanted to wait for more boards...
sys.exit(0)
|
|
# vim: set ts=4 sw=4 expandtab sts=4 fileencoding=utf-8:
# Copyright (c) 2013-2015 Christian Geier et al.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import unicode_literals
import icalendar
from click import confirm, echo, style, prompt
from vdirsyncer.utils.vobject import Item
from collections import defaultdict
import datetime
import itertools
import logging
import sys
import textwrap
from khal import aux, calendar_display
from khal.compat import to_unicode
from khal.khalendar.exceptions import ReadOnlyCalendarError, DuplicateUid
from khal.exceptions import InvalidDate, FatalError
from khal.khalendar.event import Event
from khal.khalendar.backend import sort_key
from khal import __version__, __productname__
from khal.log import logger
from .terminal import colored, get_terminal_size, merge_columns
def construct_daynames(daylist, longdateformat):
"""returns a list of tuples of datetime objects and datenames
:param daylist: list of dates
:type daylist: list(datetime.date)
:param longdateformat: format in which to print dates
:param str
:returns: list of names and dates
:rtype: list((str, datetime.date))
"""
for date in daylist:
if date == datetime.date.today():
yield (date, 'Today:')
elif date == datetime.date.today() + datetime.timedelta(days=1):
yield (date, 'Tomorrow:')
else:
yield (date, date.strftime(longdateformat))
def get_agenda(collection, locale, dates=None, firstweekday=0,
days=None, events=None, width=45, show_all_days=False):
"""returns a list of events scheduled for all days in daylist
included are header "rows"
:param collection:
:type collection: khalendar.CalendarCollection
:param dates: a list of all dates for which the events should be return,
including what should be printed as a header
:type collection: list(str)
:param show_all_days: True if all days must be shown, event without event
:type show_all_days: Boolean
:returns: a list to be printed as the agenda for the given days
:rtype: list(str)
"""
event_column = list()
if days is None:
days = 2
if dates is None or len(dates) == 0:
dates = [datetime.date.today()]
else:
try:
dates = [
aux.guessdatetimefstr([date], locale)[0].date()
if not isinstance(date, datetime.date) else date
for date in dates
]
except InvalidDate as error:
logging.fatal(error)
sys.exit(1)
if days is not None:
daylist = [date + datetime.timedelta(days=one)
for one in range(days) for date in dates]
daylist.sort()
daylist = construct_daynames(daylist, locale['longdateformat'])
localize = locale['local_timezone'].localize
for day, dayname in daylist:
start = localize(datetime.datetime.combine(day, datetime.time.min))
end = localize(datetime.datetime.combine(day, datetime.time.max))
# TODO unify allday and datetime events
all_day_events = collection.get_allday_by_time_range(day)
events = collection.get_datetime_by_time_range(start, end)
if len(events) == 0 and len(all_day_events) == 0 and not show_all_days:
continue
event_column.append(style(dayname, bold=True))
events.sort(key=lambda e: e.start)
for event in itertools.chain(all_day_events, events):
desc = textwrap.wrap(event.relative_to(day), width)
event_column.extend([colored(d, event.color) for d in desc])
if event_column == []:
event_column = [style('No events', bold=True)]
return event_column
def calendar(collection, date=None, firstweekday=0, encoding='utf-8',
weeknumber=False, show_all_days=False, **kwargs):
if date is None:
date = [datetime.datetime.today()]
term_width, _ = get_terminal_size()
lwidth = 25
rwidth = term_width - lwidth - 4
event_column = get_agenda(
collection, dates=date, width=rwidth, show_all_days=show_all_days,
**kwargs)
calendar_column = calendar_display.vertical_month(
firstweekday=firstweekday, weeknumber=weeknumber)
rows = merge_columns(calendar_column, event_column)
# XXX: Generate this as a unicode in the first place, rather than
# casting it.
echo('\n'.join(rows).encode(encoding))
def agenda(collection, date=None, encoding='utf-8',
show_all_days=False, **kwargs):
term_width, _ = get_terminal_size()
event_column = get_agenda(collection, dates=date, width=term_width,
show_all_days=show_all_days, **kwargs)
# XXX: Generate this as a unicode in the first place, rather than
# casting it.
echo(to_unicode('\n'.join(event_column), encoding))
def new_from_string(collection, calendar_name, conf, date_list, location=None, repeat=None,
until=None):
"""construct a new event from a string and add it"""
try:
event = aux.construct_event(
date_list,
location=location,
repeat=repeat,
until=until,
locale=conf['locale'])
except FatalError:
sys.exit(1)
event = Event.fromVEvents(
[event], calendar=calendar_name, locale=conf['locale'])
try:
collection.new(event)
except ReadOnlyCalendarError:
logger.fatal('ERROR: Cannot modify calendar "{}" as it is '
'read-only'.format(calendar_name))
sys.exit(1)
if conf['default']['print_new'] == 'event':
echo(event.event_description)
elif conf['default']['print_new'] == 'path':
path = collection._calnames[event.calendar].path + event.href
echo(path.encode(conf['locale']['encoding']))
def interactive(collection, conf):
"""start the interactive user interface"""
from . import ui
pane = ui.ClassicView(collection,
conf,
title='select an event',
description='do something')
ui.start_pane(
pane, pane.cleanup,
program_info='{0} v{1}'.format(__productname__, __version__)
)
def import_ics(collection, conf, ics, batch=False, random_uid=False):
"""
:param batch: setting this to True will insert without asking for approval,
even when an event with the same uid already exists
:type batch: bool
"""
cal = icalendar.Calendar.from_ical(ics)
events = [item for item in cal.walk() if item.name == 'VEVENT']
events_grouped = defaultdict(list)
for event in events:
events_grouped[event['UID']].append(event)
vevents = list()
for uid in events_grouped:
vevents.append(sorted(events_grouped[uid], key=sort_key))
for vevent in vevents:
import_event(vevent, collection, conf['locale'], batch, random_uid)
def import_event(vevent, collection, locale, batch, random_uid):
"""import one event into collection, let user choose the collection"""
# print all sub-events
for sub_event in vevent:
if not batch:
event = Event.fromVEvents(
[sub_event], calendar=collection.default_calendar_name, locale=locale)
echo(event.event_description)
# get the calendar to insert into
if batch or len(collection.writable_names) == 1:
calendar_name = collection.default_calendar_name
else:
choice = list()
for num, name in enumerate(collection.writable_names):
choice.append('{}({})'.format(name, num))
choice = ', '.join(choice)
while True:
value = prompt('Which calendar do you want to import to? \n'
'{}'.format(choice), default=collection.default_calendar_name)
try:
number = int(value)
calendar_name = collection.writable_names[number]
break
except (ValueError, IndexError):
matches = filter(lambda x: x.startswith(value), collection.writable_names)
if len(matches) == 1:
calendar_name = matches[0]
break
echo('invalid choice')
if batch or confirm("Do you want to import this event into `{}`?"
"".format(calendar_name)):
ics = aux.ics_from_list(vevent, random_uid)
try:
collection.new(
Item(ics.to_ical().decode('utf-8')),
collection=calendar_name)
except DuplicateUid:
if batch or confirm("An event with the same UID already exists. "
"Do you want to update it?"):
collection.force_update(
Item(ics.to_ical().decode('utf-8')),
collection=calendar_name)
else:
logger.warn("Not importing event with UID `{}`".format(event.uid))
|
|
#!/usr/bin/env python
"""
This script tests the steps of the promoter workflow.
- Checks the dlrn API that the hash under test has been promoted
to the promotion target
- Checks that containers with that hash are pushed to repo 2
- Checks that images are uploaded with that hash and linked to
promotion target
- Checks the promoter logs for expected strings
"""
import argparse
import logging
import os
import re
import stat
import dlrnapi_client
try:
import urllib2 as url_lib
except ImportError:
import urllib.request as url_lib
import yaml
from dlrn_hash import DlrnHash
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger("promoter-integration-checks")
log.setLevel(logging.DEBUG)
def check_dlrn_promoted_hash(stage_info=None, **kwargs):
"""
Check that the the supposed hash has been promoted to
promotion_target as recorded in DLRN.
:param stage_info: a dictionary containing parameter of the staging env
:param kwargs: additional parameter for non-staged executions
:return: None
"""
if stage_info is not None:
# We are checking a stage
api_url = stage_info['dlrn']['server']['api_url']
promotion_target = stage_info['dlrn']['promotion_target']
candidate_commit = \
stage_info['dlrn']['promotions']['promotion_candidate']
candidate_hash = DlrnHash(source=candidate_commit)
api_client = dlrnapi_client.ApiClient(host=api_url)
dlrn_client = dlrnapi_client.DefaultApi(api_client=api_client)
params = dlrnapi_client.PromotionQuery()
params.limit = 1
params.promote_name = promotion_target
else:
# We are checking production server
# TODO(gcerami) implement this branch ?
pass
try:
api_response = dlrn_client.api_promotions_get(params)
log.debug(api_response)
except dlrnapi_client.rest.ApiException:
log.error('Exception when calling api_promotions_get: %s',
dlrnapi_client.rest.ApiException)
raise
error_msg = "No promotions for hash {}".format(candidate_hash)
assert api_response != [], error_msg
promotion_hash = DlrnHash(source=api_response[0])
error_message = ("Expected full hash: {}"
" has not been promoted to {}."
"".format(promotion_hash.full_hash, promotion_target))
conditions = [(promotion.promote_name == promotion_target)
for promotion in api_response]
assert any(conditions), error_message
def query_container_registry_promotion(stage_info=None, **kwargs):
"""
Check that the hash containers have been pushed to the
promotion registry with the promotion_target tag
:param stage_info: a dictionary containing parameter of the staging env
:param kwargs: additional parameter for non-staged executions
:return: None
"""
if stage_info is not None:
registry_target = stage_info['registries']['targets'][0]['host']
promotion_target = stage_info['dlrn']['promotion_target']
candidate_dict = stage_info['dlrn']['promotions']['promotion_candidate']
candidate_hash = DlrnHash(source=candidate_dict)
missing_images = []
no_ppc = stage_info.get('ppc_manifests', True)
for line in stage_info['containers']['images']:
name, tag = line.split(":")
reg_url = "http://{}/v2/{}/manifests/{}".format(
registry_target, name, tag
)
log.info("Checking for promoted container hash: %s", reg_url)
try:
url_lib.urlopen(reg_url)
log.debug("%s:%s found", name, tag)
except url_lib.HTTPError as ex:
log.exception(ex)
if no_ppc and '_ppc64le' in tag:
log.info("(expected - ppc manifests disabled)"
"Image not found - %s", line)
else:
log.error("Image not found - %s", line)
missing_images.append(line)
# For the full_hash lines only, check that there is
# an equivalent promotion_target entry
if tag == candidate_hash.full_hash:
reg_url = "http://{}/v2/{}/manifests/{}".format(
registry_target, name, promotion_target
)
log.info("Checking for promoted container tag: %s", reg_url)
try:
url_lib.urlopen(reg_url)
log.debug("%s:%s found", name, promotion_target)
except url_lib.HTTPError as ex:
log.exception(ex)
log.error("Image with named tag not found - %s", line)
promo_tgt_line = line.replace(candidate_hash.full_hash,
promotion_target)
missing_images.append(promo_tgt_line)
else:
# We are checking production
# TODO: how to verify promoter containers
log.info("Compare images tagged with hash and promotion target:")
log.error("Not implemented")
assert missing_images == [], "Images are missing {}".format(missing_images)
def compare_tagged_image_hash(stage_info=None, **kwargs):
"""
Ensure that the promotion target images directory
is a soft link to the promoted full hash images directory.
:param stage_info: a dictionary containing parameter of the staging env
:param kwargs: additional parameter for non-staged executions
:return: None
"""
if stage_info is not None:
# We are cheking a stage
distro_name = stage_info['main']['distro_name']
distro_version = stage_info['main']['distro_version']
distro = "{}{}".format(distro_name, distro_version)
release = stage_info['main']['release']
target_label = stage_info['dlrn']['promotion_target']
images_top_root = stage_info['overcloud_images']['root']
images_top_root = images_top_root.rstrip("/")
images_root = os.path.join(images_top_root, distro, release,
"rdo_trunk")
promotion_link = os.path.join(images_root, target_label)
candidate_dict = stage_info['dlrn']['promotions']['promotion_candidate']
candidate_hash = DlrnHash(source=candidate_dict)
promotion_dir = os.path.join(images_root, candidate_hash.full_hash)
current_dict = stage_info['dlrn']['promotions']['currently_promoted']
current_hash = DlrnHash(source=current_dict)
previous_dict = stage_info['dlrn']['promotions']['previously_promoted']
previous_label = previous_dict['name']
previous_link = os.path.join(images_root, previous_label)
previous_dir = os.path.join(images_root, current_hash.full_hash)
rl_module = os
else:
# We are checking production
# FIXME(gerami) this branch needs revisiting
images_base_dir = kwargs['image_base']
user = kwargs['user']
key_path = kwargs['key_path']
# promotion_target = args[3]
# full_hash = args[4]
# release = kwargs['release']
log.debug("Install required for nonstaging env")
import pysftp
sftp = pysftp.Connection(
host=images_base_dir,
username=user, private_key=key_path)
# images_dir = os.path.join(
# '/var/www/html/images',
# release, 'rdo_trunk')
rl_module = sftp
check_links(rl_module, promotion_link, target_label, promotion_dir,
previous_link=previous_link, previous_dir=previous_dir)
def check_links(rl_module, promotion_link, target_label, promotion_dir,
previous_link=None, previous_dir=None):
try:
file_mode = rl_module.lstat(promotion_link).st_mode
assert True
except OSError:
assert False, "No link was created"
linked_dir = rl_module.readlink(promotion_link)
assert stat.S_ISLNK(file_mode), "promoter dir is not a symlink"
error_msg = "{} points to wrong dir {} instead of {}".format(target_label,
linked_dir,
promotion_dir)
assert linked_dir == promotion_dir, error_msg
if previous_dir is not None and previous_link is not None:
try:
file_mode = rl_module.lstat(previous_link).st_mode
assert True
except OSError:
assert False, "No link was created"
assert stat.S_ISLNK(file_mode), "Promoted dir is not a symlink"
p_link = rl_module.readlink(previous_link)
msg = "{} != {}".format(p_link, previous_dir)
assert p_link == previous_dir, msg
def parse_promotion_logs(stage_info=None, **kwargs):
"""
Check that the promotion logs have the right
strings printed for the promotion status
:param stage_info: a dictionary containing parameter of the staging env
:param kwargs: additional parameter for non-staged executions
:return: None
"""
if stage_info is not None:
# We are checking a stage
# There's a difference between function and integration tests here.
# Functional tests drive promoter configuration and forces a
# logfile location in the stage dir. In functional tests we need to
# check that log file.
# In Integration tests the promoter is run independently and the log
# file used does not depend on stage env at all
# We need to check first if we are logging in the primary location,
# and if the file does not exist, we can use the location proposed by
# the stage
try:
logfile = stage_info['main']['log_file']
except KeyError:
logfile = ""
log.info("Verifying presence of log file in %s", logfile)
try:
os.stat(os.path.expanduser(logfile))
except OSError:
log.warning("%s not found", logfile)
logfile = stage_info['main']['log_file']
log.info("Verifying presence of log file in %s", logfile)
try:
os.stat(os.path.expanduser(logfile))
except OSError:
log.error("No log file found")
raise
log.info("Using %s as log file to parse", logfile)
candidate_dict = stage_info['dlrn']['promotions']['promotion_candidate']
candidate_hash = DlrnHash(source=candidate_dict)
with open(os.path.expanduser(logfile), 'r') as lf:
logfile_contents = lf.read()
else:
# We are checking production
# logfile = kwargs['logfile']
# from bs4 import BeautifulSoup
log.debug("Reading web hosted log file")
log.error("Not implemented")
# url = url_lib.request.urlopen(logfile).read()
# soup = BeautifulSoup(url, 'html.parser')
# logfile_contents = soup.get_text()
# Check that the promoter process finished
error_message = "Promoter never finished"
termination_message = "Promoter terminated normally"
assert termination_message in logfile_contents, error_message
# We have a list of hashes at our disposal, we know which one
# will have to fail, and which one will have to pass
# We can do all in the same pass
# Patterns for the log in the new code
candidate_hash_pattern = re.sub("timestamp:.*",
"timestamp:.*",
str(candidate_hash))
# TODO(gcerami) check if something can be broken is we are not checking
# the component correctly
candidate_hash_pattern = re.sub("component:.*",
"component:.*",
candidate_hash_pattern)
success_pattern_container = re.compile(
"Containers promote '{}' to tripleo-ci-staging-promoted: Successful "
"promotion".format(candidate_hash_pattern)
)
success_pattern_images = re.compile(
"Qcow promote '{}' to tripleo-ci-staging-promoted: "
"Successful promotion".format(candidate_hash_pattern)
)
success_pattern_criteria = re.compile(
"Candidate hash '{}': criteria met, attempting promotion to "
"tripleo-ci-staging-promoted".format(candidate_hash_pattern)
)
success_pattern_summary = re.compile(
"Summary: Promoted 1 hashes this round"
)
success_pattern_target = re.compile(
"Candidate hash '{}': SUCCESSFUL promotion to "
"tripleo-ci-staging-promoted".format(candidate_hash_pattern)
)
success_patterns = [
success_pattern_summary,
success_pattern_criteria,
success_pattern_images,
success_pattern_target,
success_pattern_container,
]
# This commit is supposed succeed
# Check strings for passing hashes
log.info("Status Passing: %s", candidate_hash)
# Build pattern for successful promotion
for check_pattern in success_patterns:
success_pattern_search = \
check_pattern.search(logfile_contents)
error_message = "Pattern not found: %s" % check_pattern.pattern
assert success_pattern_search is not None, error_message
def main():
parser = argparse.ArgumentParser(
description='Pass a config file.')
parser.add_argument('--stage-info-file', default="/tmp/stage-info.yaml")
args = parser.parse_args()
with open(args.stage_info_file) as si:
stage_info = yaml.safe_load(si)
log.info('Running test: check_dlrn_promoted_hash')
check_dlrn_promoted_hash(stage_info=stage_info)
log.info('Running test: query_container_registry_promotion')
query_container_registry_promotion(stage_info=stage_info)
log.info('Running test: compare_tagged_image_hash')
compare_tagged_image_hash(stage_info=stage_info)
log.info('Running test: parse_promotion_logs')
parse_promotion_logs(stage_info=stage_info)
if __name__ == "__main__":
main()
|
|
"""
Tests for all shipped sniplates/django.html widgets.
"""
import datetime
from django import forms
from django.template.loader import get_template
from django.test import SimpleTestCase
from django.utils.encoding import python_2_unicode_compatible
from django.utils.datastructures import MultiValueDict
from .forms import DjangoWidgetsForm, FilesForm
from .utils import TemplateTestMixin, template_dirs
@python_2_unicode_compatible
class FakeFieldFile(object):
"""
Quacks like a FieldFile (has a .url and unicode representation), but
doesn't require us to care about storages etc.
Taken from django.tests.forms_tests.test.test_widgets.
"""
url = 'something'
def __str__(self):
return self.url
@template_dirs('field_tag')
class TestFieldTag(TemplateTestMixin, SimpleTestCase):
def test_widgets_unbound(self):
"""
Tests all form fields one by one, with a fresh, unbound form without
initial data.
"""
self.ctx['form'] = DjangoWidgetsForm()
tmpl = get_template('widgets_django')
output = tmpl.render(self.ctx)
# map field to expected widget output
expected_output = {
'char': '<input type="text" name="char" id="id_char" value="" class=" " required>',
'email': '<input type="email" name="email" id="id_email" value="" class=" " required>',
'url': '<input type="url" name="url" id="id_url" value="" class=" " required>',
'number': '<input type="number" name="number" id="id_number" value="" class=" " required>',
'password': '<input type="password" name="password" id="id_password" value="" class=" " required>',
'hidden': '<input type="hidden" name="hidden" id="id_hidden" value="" class=" " required>',
# this one is hard to test, as it may NOT contain the output - it's empty by default
'multiple_hidden': [
'<input type="hidden" name="multiple_hidden" id="id_multiple_hidden_0" value="N" required>',
'<input type="hidden" name="multiple_hidden" id="id_multiple_hidden_1" value="o" required>',
'<input type="hidden" name="multiple_hidden" id="id_multiple_hidden_2" value="n" required>',
'<input type="hidden" name="multiple_hidden" id="id_multiple_hidden_3" value="e" required>',
],
'date': '<input type="date" name="date" id="id_date" value="" class=" " required>',
'datetime': '<input type="datetime" name="datetime" id="id_datetime" value="" class=" " required>',
'time': '<input type="time" name="time" id="id_time" value="" class=" " required>',
'text': '<textarea name="text" id="id_text" class=" " required cols="40" rows="10"></textarea> ',
'checkbox': '''
<label for="id_checkbox" class="">
<input name="checkbox" id="id_checkbox" type="checkbox">
Checkbox
</label>''',
'select': '''
<select name="select" id="id_select">
<option value="1">a</option>
<option value="11">b</option>
<option value="22">c</option>
</select>''',
'optgroup_select': '''
<select id="id_optgroup_select" name="optgroup_select">
<optgroup label="label1">
<option value="1">a</option>
<option value="11">b</option>
</optgroup>
<optgroup label="label2">
<option value="22">c</option>
</optgroup>
</select>''',
'null_boolean_select': '''
<select id="id_null_boolean_select" name="null_boolean_select">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>''',
'select_multiple': '''
<select name="select_multiple" id="id_select_multiple" multiple>
<option value="1">a</option>
<option value="11">b</option>
<option value="22">c</option>
</select>''',
'radio_select': '''
<ul id="id_radio_select">
<li><input name="radio_select" type="radio" id="id_radio_select_0" value="1" >a</li>
<li><input name="radio_select" type="radio" id="id_radio_select_1" value="11" >b</li>
<li><input name="radio_select" type="radio" id="id_radio_select_2" value="22" >c</li>
</ul>''',
'checkbox_select_multiple': '''
<ul id="id_checkbox_select_multiple">
<li><input name="checkbox_select_multiple" type="checkbox"
id="id_checkbox_select_multiple_0" value="1">a</li>
<li><input name="checkbox_select_multiple" type="checkbox"
id="id_checkbox_select_multiple_1" value="11">b</li>
<li><input name="checkbox_select_multiple" type="checkbox"
id="id_checkbox_select_multiple_2" value="22">c</li>
</ul>''',
'file': '<input id="id_file" name="file" type="file" value="" class=" " required>',
'clearable_file': '''
<input id="id_clearable_file" name="clearable_file" type="file" value="" class=" " required>''',
}
self.assertInHTML(expected_output['char'], output, msg_prefix='TextInput rendered incorrectly: ')
self.assertInHTML(expected_output['email'], output, msg_prefix='EmailInput rendered incorrectly: ')
self.assertInHTML(expected_output['url'], output, msg_prefix='UrlInput rendered incorrectly: ')
self.assertInHTML(expected_output['number'], output, msg_prefix='NumberInput rendered incorrectly: ')
self.assertInHTML(expected_output['password'], output, msg_prefix='PasswordInput rendered incorrectly: ')
self.assertInHTML(expected_output['hidden'], output, msg_prefix='HiddenInput rendered incorrectly: ')
for input_ in expected_output['multiple_hidden']:
self.assertNotInHTML(input_, output, msg_prefix='MultipleHiddenInput rendered incorrectly: %s: ' % input_)
self.assertInHTML(expected_output['date'], output, msg_prefix='DateInput rendered incorrectly: ')
self.assertInHTML(expected_output['datetime'], output, msg_prefix='DateTimeInput rendered incorrectly: ')
self.assertInHTML(expected_output['time'], output, msg_prefix='TimeInput rendered incorrectly: ')
self.assertInHTML(expected_output['text'], output, msg_prefix='Textarea rendered incorrectly: ')
self.assertInHTML(expected_output['checkbox'], output, msg_prefix='CheckboxInput rendered incorrectly: ')
# all kind of selects
self.assertInHTML(expected_output['select'], output, msg_prefix='Select rendered incorrectly: ')
self.assertInHTML(
expected_output['optgroup_select'], output, msg_prefix='Select with optgroups rendered incorrectly: ')
self.assertInHTML(
expected_output['null_boolean_select'], output, msg_prefix='NullBooleanSelect rendered incorrectly: ')
self.assertInHTML(
expected_output['select_multiple'], output, msg_prefix='SelectMultiple rendered incorrectly: ')
self.assertInHTML(expected_output['radio_select'], output, msg_prefix='RadioSelect rendered incorrectly: ')
self.assertInHTML(
expected_output['checkbox_select_multiple'], output,
msg_prefix='CheckboxSelectMultiple rendered incorrectly: '
)
self.assertInHTML(expected_output['file'], output, msg_prefix='FileInput rendered incorrectly: ')
def test_widgets_bound(self):
"""
Tests all form fields one by one, with a bound form.
"""
self.ctx['form'] = DjangoWidgetsForm(data=MultiValueDict({
'char': ['test char'],
'email': ['foo@bar.com'],
'url': ['https://example.com'],
'number': [42],
'password': ['secret'],
'hidden': ['peek-a-boo'],
'multiple_hidden': ['first', 'second'],
'date': [datetime.date(2016, 1, 25)],
'datetime': [datetime.datetime(2016, 1, 25, 9, 0, 42)],
'time': [datetime.time(8, 9)],
'text': ['Lorem ipsum...'],
'checkbox': [True],
'select': ['22'],
'optgroup_select': ['22'],
'null_boolean_select': [False],
'select_multiple': ['11', '22', 1],
'radio_select': ['11'],
'checkbox_select_multiple': ['1', 11],
'file': ['not-a-suspicious-file.exe'],
'clearable_file': ['also-not-a-suspicious-file.exe'],
}))
tmpl = get_template('widgets_django')
output = tmpl.render(self.ctx)
expected = {
'char': '<input type="text" name="char" id="id_char" value="test char" class=" " required>',
'email': '<input type="email" name="email" id="id_email" value="foo@bar.com" class=" " required>',
'url': '<input type="url" name="url" id="id_url" value="https://example.com" class=" " required>',
'number': '<input type="number" name="number" id="id_number" value="42" class=" " required>',
'password': '<input type="password" name="password" id="id_password" value="secret" class=" " required>',
'hidden': '<input type="hidden" name="hidden" id="id_hidden" value="peek-a-boo" class=" " required>',
'multiple_hidden': [
'<input type="hidden" name="multiple_hidden" id="id_multiple_hidden_0" value="first" required>',
'<input type="hidden" name="multiple_hidden" id="id_multiple_hidden_1" value="second" required>',
],
'date': '<input type="date" name="date" id="id_date" value="2016-01-25" class=" " required>',
'datetime': '<input type="datetime" name="datetime" id="id_datetime" value="2016-01-25 09:00:42" class=" " required>',
'time': '<input type="time" name="time" id="id_time" value="08:09:00" class=" " required>',
'text': '<textarea name="text" id="id_text" class=" " required cols="40" rows="10">Lorem ipsum...</textarea> ',
'checkbox': '''
<label for="id_checkbox" class="">
<input name="checkbox" id="id_checkbox" type="checkbox" checked>
Checkbox
</label>''',
'select': '''
<select name="select" id="id_select">
<option value="1">a</option>
<option value="11">b</option>
<option value="22" selected>c</option>
</select>''',
'optgroup_select': '''
<select id="id_optgroup_select" name="optgroup_select">
<optgroup label="label1">
<option value="1">a</option>
<option value="11">b</option>
</optgroup>
<optgroup label="label2">
<option value="22" selected>c</option>
</optgroup>
</select>''',
'null_boolean_select': '''
<select id="id_null_boolean_select" name="null_boolean_select">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected>No</option>
</select>''',
'select_multiple': '''
<select name="select_multiple" id="id_select_multiple" multiple>
<option value="1" selected>a</option>
<option value="11" selected>b</option>
<option value="22" selected>c</option>
</select>''',
'radio_select': '''
<ul id="id_radio_select">
<li><input name="radio_select" type="radio" id="id_radio_select_0" value="1" >a</li>
<li><input name="radio_select" type="radio" id="id_radio_select_1" value="11" checked>b</li>
<li><input name="radio_select" type="radio" id="id_radio_select_2" value="22" >c</li>
</ul>''',
'checkbox_select_multiple': '''
<ul id="id_checkbox_select_multiple">
<li><input name="checkbox_select_multiple" type="checkbox"
id="id_checkbox_select_multiple_0" value="1" checked>a</li>
<li><input name="checkbox_select_multiple" type="checkbox"
id="id_checkbox_select_multiple_1" value="11" checked>b</li>
<li><input name="checkbox_select_multiple" type="checkbox"
id="id_checkbox_select_multiple_2" value="22">c</li>
</ul>''',
# file inputs never show the value, the old value is of no use [see django.forms tests]
'file': '<input id="id_file" name="file" type="file" value="" class=" error" required>',
'clearable_file': '''
<input id="id_clearable_file" name="clearable_file" type="file" value="" class=" error" required>''',
}
self.assertInHTML(expected['char'], output)
self.assertInHTML(expected['email'], output)
self.assertInHTML(expected['url'], output)
self.assertInHTML(expected['number'], output)
self.assertInHTML(expected['password'], output)
self.assertInHTML(expected['hidden'], output)
for input_ in expected['multiple_hidden']:
self.assertInHTML(input_, output)
self.assertInHTML(expected['text'], output)
self.assertInHTML(expected['checkbox'], output)
self.assertInHTML(expected['select'], output)
self.assertInHTML(expected['optgroup_select'], output)
self.assertInHTML(expected['null_boolean_select'], output)
self.assertInHTML(expected['select_multiple'], output)
self.assertInHTML(expected['radio_select'], output)
self.assertInHTML(expected['checkbox_select_multiple'], output)
self.assertInHTML(expected['file'], output)
self.assertInHTML(expected['clearable_file'], output)
# DateTime based
self.assertInHTML(expected['date'], output)
self.assertInHTML(expected['datetime'], output)
self.assertInHTML(expected['time'], output)
def test_date_input_different_format(self):
"""
Tests that the ``django.forms`` configured input format is respected.
"""
class Form(forms.Form):
date = forms.DateField(widget=forms.DateInput(format='%m-%Y-%d'))
self.ctx['form'] = Form(initial={'date': datetime.date(2016, 3, 27)})
tmpl = get_template('widgets_django')
output = tmpl.render(self.ctx)
self.assertHTMLEqual(
output,
'<input type="date" name="date" id="id_date" value="03-2016-27" class=" " required>'
)
def test_filefield_extractor(self):
"""
Assert that the clearable file input is properly rendered.
"""
self.ctx['form'] = FilesForm(initial={'clearable_file': FakeFieldFile()})
tmpl = get_template('widgets_django')
output = tmpl.render(self.ctx)
self.assertHTMLEqual(
output, '''
Currently: <a href="something">something</a>
<input type="checkbox" name="clearable_file-clear" id="clearable_file-clear_id" />
<label for="clearable_file-clear_id">Clear</label><br />
Change: <input id="id_clearable_file" name="clearable_file" type="file" class=" " value="" required />
'''
)
|
|
# Copyright 2015 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pytest
import tempfile
from distutils.version import LooseVersion
from f5.bigip.tm.asm.policies.parameters import ParametersResource
from f5.bigip.tm.asm.policies import Policy
from requests.exceptions import HTTPError
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) >= LooseVersion('13.0.0'),
reason='Needs TMOS version less than v13.0.0 to pass.'
)
class TestPolicy(object):
def test_create_req_arg(self, policy):
pol1 = policy
endpoint = str(pol1.id)
base_uri = 'https://localhost/mgmt/tm/asm/policies/'
final_uri = base_uri + endpoint
assert pol1.selfLink.startswith(final_uri)
assert pol1.subPath == '/Common'
assert pol1.kind == 'tm:asm:policies:policystate'
def test_create_optional_args(self, mgmt_root):
file = tempfile.NamedTemporaryFile()
name = os.path.basename(file.name)
codes = [400, 401, 403]
pol1 = mgmt_root.tm.asm.policies_s.policy.create(
name=name,
allowedResponseCodes=codes
)
endpoint = str(pol1.id)
base_uri = 'https://localhost/mgmt/tm/asm/policies/'
final_uri = base_uri+endpoint
assert pol1.name == name
assert pol1.selfLink.startswith(final_uri)
assert pol1.kind == 'tm:asm:policies:policystate'
assert pol1.allowedResponseCodes == codes
pol1.delete()
def test_refresh(self, policy, mgmt_root):
pol1 = policy
pol2 = mgmt_root.tm.asm.policies_s.policy.load(id=pol1.id)
assert pol1.name == pol2.name
assert pol1.selfLink == pol2.selfLink
assert pol1.kind == pol2.kind
assert pol1.allowedResponseCodes == pol2.allowedResponseCodes
pol1.modify(allowedResponseCodes=[400, 503])
assert pol1.selfLink == pol2.selfLink
assert pol1.allowedResponseCodes != pol2.allowedResponseCodes
pol2.refresh()
assert pol1.allowedResponseCodes == pol2.allowedResponseCodes
def test_delete(self, mgmt_root):
file = tempfile.NamedTemporaryFile()
name = os.path.basename(file.name)
pol1 = mgmt_root.tm.asm.policies_s.policy.create(name=name)
idhash = str(pol1.id)
pol1.delete()
with pytest.raises(HTTPError) as err:
mgmt_root.tm.asm.policies_s.policy.load(id=idhash)
assert err.value.response.status_code == 404
def test_load_no_object(self, mgmt_root):
with pytest.raises(HTTPError) as err:
mgmt_root.tm.asm.policies_s.policy.load(id='Lx3553-321')
assert err.value.response.status_code == 404
def test_load(self, policy, mgmt_root):
pol1 = policy
endpoint = str(pol1.id)
base_uri = 'https://localhost/mgmt/tm/asm/policies/'
final_uri = base_uri+endpoint
assert pol1.selfLink.startswith(final_uri)
assert pol1.subPath == '/Common'
assert pol1.kind == 'tm:asm:policies:policystate'
pol1.modify(allowedResponseCodes=[400, 503])
assert pol1.allowedResponseCodes == [400, 503]
pol2 = mgmt_root.tm.asm.policies_s.policy.load(id=pol1.id)
assert pol1.name == pol2.name
assert pol1.selfLink == pol2.selfLink
assert pol1.kind == pol2.kind
assert pol1.allowedResponseCodes == pol2.allowedResponseCodes
def test_policy_collection(self, policy, mgmt_root):
pc = mgmt_root.tm.asm.policies_s.get_collection()
assert isinstance(pc, list)
assert len(pc)
assert isinstance(pc[0], Policy)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion('13.0.0'),
reason='Needs TMOS version greater than or equal to v13.0.0 to pass.'
)
class TestPolicyV13(object):
def test_create_req_arg(self, policy):
pol1 = policy
endpoint = str(pol1.id)
base_uri = 'https://localhost/mgmt/tm/asm/policies/'
final_uri = base_uri + endpoint
assert pol1.selfLink.startswith(final_uri)
assert pol1.subPath == '/Common'
assert pol1.kind == 'tm:asm:policies:policystate'
def test_create_optional_args(self, mgmt_root):
file = tempfile.NamedTemporaryFile()
name = os.path.basename(file.name)
codes = [400, 401, 403]
pol1 = mgmt_root.tm.asm.policies_s.policy.create(
name=name,
allowedResponseCodes=codes
)
endpoint = str(pol1.id)
base_uri = 'https://localhost/mgmt/tm/asm/policies/'
final_uri = base_uri+endpoint
assert pol1.name == name
assert pol1.selfLink.startswith(final_uri)
assert pol1.kind == 'tm:asm:policies:policystate'
# In v13, the allowedResponseCodes were moved to the General
# UnnamedResource
generals = pol1.general.load()
assert generals.allowedResponseCodes == codes
pol1.delete()
def test_refresh(self, policy, mgmt_root):
pol1 = policy
pol2 = mgmt_root.tm.asm.policies_s.policy.load(id=pol1.id)
gen1 = pol1.general.load()
gen2 = pol2.general.load()
assert pol1.name == pol2.name
assert pol1.selfLink == pol2.selfLink
assert pol1.kind == pol2.kind
assert gen1.allowedResponseCodes == gen2.allowedResponseCodes
gen1.modify(allowedResponseCodes=[400, 503])
assert pol1.selfLink == pol2.selfLink
assert gen1.allowedResponseCodes != gen2.allowedResponseCodes
gen2.refresh()
assert gen1.allowedResponseCodes == gen2.allowedResponseCodes
def test_delete(self, mgmt_root):
file = tempfile.NamedTemporaryFile()
name = os.path.basename(file.name)
pol1 = mgmt_root.tm.asm.policies_s.policy.create(name=name)
idhash = str(pol1.id)
pol1.delete()
with pytest.raises(HTTPError) as err:
mgmt_root.tm.asm.policies_s.policy.load(id=idhash)
assert err.value.response.status_code == 404
def test_load_no_object(self, mgmt_root):
with pytest.raises(HTTPError) as err:
mgmt_root.tm.asm.policies_s.policy.load(id='Lx3553-321')
assert err.value.response.status_code == 404
def test_load(self, policy, mgmt_root):
pol1 = policy
gen1 = pol1.general.load()
endpoint = str(pol1.id)
base_uri = 'https://localhost/mgmt/tm/asm/policies/'
final_uri = base_uri + endpoint
assert pol1.selfLink.startswith(final_uri)
assert pol1.subPath == '/Common'
assert pol1.kind == 'tm:asm:policies:policystate'
gen1.modify(allowedResponseCodes=[400, 503])
assert gen1.allowedResponseCodes == [400, 503]
pol2 = mgmt_root.tm.asm.policies_s.policy.load(id=pol1.id)
gen2 = pol2.general.load()
assert pol1.name == pol2.name
assert pol1.selfLink == pol2.selfLink
assert pol1.kind == pol2.kind
assert gen1.allowedResponseCodes == gen2.allowedResponseCodes
def test_policy_collection(self, policy, mgmt_root):
pc = mgmt_root.tm.asm.policies_s.get_collection()
assert isinstance(pc, list)
assert len(pc)
assert isinstance(pc[0], Policy)
class TestPolicyParameters(object):
def test_create_req_arg(self, policy):
file = tempfile.NamedTemporaryFile()
name = os.path.basename(file.name)
param1 = policy.parameters_s.parameter.create(name=name)
assert param1.kind == 'tm:asm:policies:parameters:parameterstate'
assert param1.name == name
assert param1.type == 'explicit'
assert param1.level == 'global'
assert param1.sensitiveParameter is False
param1.delete()
def test_create_optional_args(self, policy):
file = tempfile.NamedTemporaryFile()
name = os.path.basename(file.name)
param1 = policy.parameters_s.parameter.create(
name=name,
sensitiveParameter=True
)
assert param1.kind == 'tm:asm:policies:parameters:parameterstate'
assert param1.name == name
assert param1.type == 'explicit'
assert param1.level == 'global'
assert param1.sensitiveParameter is True
param1.delete()
def test_refresh(self, policy):
file = tempfile.NamedTemporaryFile()
name = os.path.basename(file.name)
param1 = policy.parameters_s.parameter.create(name=name)
param2 = policy.parameters_s.parameter.load(id=param1.id)
assert param1.kind == param2.kind
assert param1.name == param2.name
assert param1.level == param2.level
assert param1.sensitiveParameter == param2.sensitiveParameter
param2.modify(sensitiveParameter=True)
assert param1.sensitiveParameter is False
assert param2.sensitiveParameter is True
param1.refresh()
assert param1.sensitiveParameter is True
param1.delete()
def test_delete(self, policy):
file = tempfile.NamedTemporaryFile()
name = os.path.basename(file.name)
param1 = policy.parameters_s.parameter.create(name=name)
idhash = str(param1.id)
param1.delete()
with pytest.raises(HTTPError) as err:
policy.parameters_s.parameter.load(id=idhash)
assert err.value.response.status_code == 404
def test_load_no_object(self, policy):
with pytest.raises(HTTPError) as err:
policy.parameters_s.parameter.load(id='Lx3553-321')
assert err.value.response.status_code == 404
def test_load(self, policy):
file = tempfile.NamedTemporaryFile()
name = os.path.basename(file.name)
param1 = policy.parameters_s.parameter.create(name=name)
assert param1.kind == 'tm:asm:policies:parameters:parameterstate'
assert param1.name == name
assert param1.type == 'explicit'
assert param1.level == 'global'
assert param1.sensitiveParameter is False
param1.modify(sensitiveParameter=True)
assert param1.sensitiveParameter is True
param2 = policy.parameters_s.parameter.load(id=param1.id)
assert param1.name == param2.name
assert param1.selfLink == param2.selfLink
assert param1.kind == param2.kind
assert param1.level == param2.level
assert param1.sensitiveParameter == param2.sensitiveParameter
param1.delete()
def test_parameters_subcollection(self, policy):
file = tempfile.NamedTemporaryFile()
name = os.path.basename(file.name)
param1 = policy.parameters_s.parameter.create(name=name)
assert param1.kind == 'tm:asm:policies:parameters:parameterstate'
assert param1.name == name
assert param1.type == 'explicit'
assert param1.level == 'global'
assert param1.sensitiveParameter is False
cc = policy.parameters_s.get_collection()
assert isinstance(cc, list)
assert len(cc)
assert isinstance(cc[0], ParametersResource)
param1.delete()
|
|
"""Hash-table that uses open addressing where collisions are solved by storing
the item to next free slot.
"""
from itertools import chain, islice
# Sentinel object used to mark unused slots
SENTINEL = object()
# Default size
INITIAL_SIZE = 7
# Max load factor, once this is reached table will be grown
LOAD_FACTOR = 0.7
# Growth factor
GROW = 2
class Hash(object):
"""Hash table that uses open addressing.
Attributes:
table: Table storing items, uses SENTINEL to mark unused slots
size: Number of items in the table
"""
def __init__(self, it=None):
"""Initializer, initializes Hash from given iterable.
Args:
it: Optional iterable which contents are added to table.
"""
self.table = [SENTINEL] * INITIAL_SIZE
self.size = 0
self.extend(it or [])
def __str__(self):
return 'Hash({})'.format(list(self))
def __len__(self):
return self.size
def __iter__(self):
for x in self.table:
if x is not SENTINEL:
yield x
@staticmethod
def __index_from_item(item, table):
"""Iterates over indexes in table starting from given item. First free
index should be used to store the item.
Args:
item: Item to hash
table: Table containing items
Yields:
Indexes where item should be stored in preference order
"""
size = len(table)
start = hash(item) % size
for i in range(start, start + size):
yield i % size
@staticmethod
def __add(table, item):
"""Adds given item to table.
Args:
table: Table to add the item
item: Item to add
Returns:
1 if item was added, 0 if it wasn't (it was already in the table)
"""
for i in Hash.__index_from_item(item, table):
if table[i] is SENTINEL:
table[i] = item
return 1
elif table[i] == item:
break
# Never reached without break
return 0
def add(self, item):
"""Adds item to hash table
Args:
item: Item to add
"""
if self.size == int(len(self.table) * LOAD_FACTOR):
# Rehash whole table
table = [SENTINEL] * (GROW * len(self.table))
for x in self.table:
if x is not SENTINEL:
Hash.__add(table, x)
self.table = table
self.size += Hash.__add(self.table, item)
def extend(self, it):
"""Adds items from given iterable to hash table.
Args:
it: Itertable containing items
"""
for x in it:
self.add(x)
def __find(self, item):
"""Searches index of an item in the hash table.
Args:
item: Item to search
Returns:
Index where the item is stored, -1 if not found
"""
for i in Hash.__index_from_item(item, self.table):
x = self.table[i]
if x is SENTINEL:
break
elif x == item:
return i
# Never reached without break
return -1
def __contains__(self, item):
return self.__find(item) != -1
def isdisjoint(self, other):
"""Check if two hashtables are disjoint (=don't contain the same item).
Args:
other: Other hash table
Returns:
True if hashtables are disjoint, False if not
"""
small, big = sorted((self, other), key=len)
return all(x not in big for x in small)
def __le__(self, other):
return len(self) <= len(other) and all(x in other for x in self)
def __lt__(self, other):
return len(self) < len(other) and all(x in other for x in self)
def __eq__(self, other):
return len(self) == len(other) and all(x in other for x in self)
def __ge__(self, other):
return len(self) >= len(other) and all(x in self for x in other)
def __gt__(self, other):
return len(self) > len(other) and all(x in self for x in other)
def __ne__(self, other):
return len(self) != len(other) or any(x not in other for x in self)
def __and__(self, other):
small, big = sorted((self, other), key=len)
return Hash(x for x in small if x in big)
def __or__(self, other):
return Hash(chain(self, other))
def __sub__(self, other):
return Hash(x for x in self if x not in other)
def __xor__(self, other):
res = Hash(x for x in self if x not in other)
res.extend(x for x in other if x not in self)
return res
def clear(self):
"""Removes all items from the hashtable."""
self.table = [SENTINEL] * INITIAL_SIZE
self.size = 0
def __remove(self, index):
"""Removes item from hashtable.
Args:
index: Index of the item
"""
# In open addressing scheme all the items immediately following the
# removed item need to be hashed. Collect all them to rehash.
rehash = []
length = len(self.table)
for i in range(index, index + length):
i %= len(self.table)
if self.table[i] == SENTINEL:
break
rehash.append(self.table[i])
self.table[i] = SENTINEL
self.size -= 1
# Insert back all except the one that needed to be removed
self.extend(islice(rehash, 1, None))
def pop(self):
"""Removes random item from hashtable and returns it.
Returns:
Removed item
Raises:
KeyError in case hashtable is empty
"""
# If items would be stored to doubly linked list time complexity
# would be O(1)
for i in range(len(self.table)):
x = self.table[i]
if x != SENTINEL:
self.__remove(i)
return x
raise KeyError
def discard(self, item):
"""Removes given item from hash,
Args:
item: Item to remove
"""
index = self.__find(item)
if index != -1:
self.__remove(index)
def remove(self, item):
"""Removes given item from hash.
Args:
item: Item to remove
Raises:
KeyError if item is not present
"""
index = self.__find(item)
if index == -1:
raise KeyError
self.__remove(index)
def __ior__(self, other):
for x in other:
self.add(x)
return self
def __iand__(self, other):
for i, x in enumerate(self.table):
if x not in other:
self.table[i] = SENTINEL
self.size -= 1
return self
def __isub__(self, other):
for x in other:
self.discard(x)
return self
def __ixor__(self, other):
self.__isub__(self & other)
return self
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeline visualization for TensorFlow using Chrome Trace Format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import re
# The timeline target is usually imported as part of BUILD target
# "platform_test", which includes also includes the "platform"
# dependency. This is why the logging import here is okay.
from tensorflow.python.platform import tf_logging as logging
class AllocationMaximum(collections.namedtuple(
'AllocationMaximum', ('timestamp', 'num_bytes', 'tensors'))):
"""Stores the maximum allocation for a given allocator within the timelne.
Parameters:
timestamp: `tensorflow::Env::NowMicros()` when this maximum was reached.
num_bytes: the total memory used at this time.
tensors: the set of tensors allocated at this time.
"""
pass
class StepStatsAnalysis(collections.namedtuple(
'StepStatsAnalysis', ('chrome_trace', 'allocator_maximums'))):
"""Stores the step stats analysis output.
Parameters:
chrome_trace: A dict containing the chrome trace analysis.
allocator_maximums: A dict mapping allocator names to AllocationMaximum.
"""
pass
class _ChromeTraceFormatter(object):
"""A helper class for generating traces in Chrome Trace Format."""
def __init__(self, show_memory=False):
"""Constructs a new Chrome Trace formatter."""
self._show_memory = show_memory
self._events = []
self._metadata = []
def _create_event(self, ph, category, name, pid, tid, timestamp):
"""Creates a new Chrome Trace event.
For details of the file format, see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
Args:
ph: The type of event - usually a single character.
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
Returns:
A JSON compatible event object.
"""
event = {}
event['ph'] = ph
event['cat'] = category
event['name'] = name
event['pid'] = pid
event['tid'] = tid
event['ts'] = timestamp
return event
def emit_pid(self, name, pid):
"""Adds a process metadata event to the trace.
Args:
name: The process name as a string.
pid: Identifier of the process as an integer.
"""
event = {}
event['name'] = 'process_name'
event['ph'] = 'M'
event['pid'] = pid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_tid(self, name, pid, tid):
"""Adds a thread metadata event to the trace.
Args:
name: The thread name as a string.
pid: Identifier of the process as an integer.
tid: Identifier of the thread as an integer.
"""
event = {}
event['name'] = 'thread_name'
event['ph'] = 'M'
event['pid'] = pid
event['tid'] = tid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_region(self, timestamp, duration, pid, tid, category, name, args):
"""Adds a region event to the trace.
Args:
timestamp: The start timestamp of this region as a long integer.
duration: The duration of this region as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
category: The event category as a string.
name: The event name as a string.
args: A JSON-compatible dictionary of event arguments.
"""
event = self._create_event('X', category, name, pid, tid, timestamp)
event['dur'] = duration
event['args'] = args
self._events.append(event)
def emit_obj_create(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object creation event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('N', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_delete(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object deletion event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('D', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_snapshot(self, category, name, timestamp, pid, tid, object_id,
snapshot):
"""Adds an object snapshot event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
snapshot: A JSON-compatible representation of the object.
"""
event = self._create_event('O', category, name, pid, tid, timestamp)
event['id'] = object_id
event['args'] = {'snapshot': snapshot}
self._events.append(event)
def emit_flow_start(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow start event to the trace.
When matched with a flow end event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_flow_end(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow end event to the trace.
When matched with a flow start event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('t', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_counter(self, category, name, pid, timestamp, counter, value):
"""Emits a record for a single counter.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counter: Name of the counter as a string.
value: Value of the counter as an integer.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = {counter: value}
self._events.append(event)
def emit_counters(self, category, name, pid, timestamp, counters):
"""Emits a counter record for the dictionary 'counters'.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counters: Dictionary of counter values.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = counters.copy()
self._events.append(event)
def format_to_string(self, pretty=False):
"""Formats the chrome trace to a string.
Args:
pretty: (Optional.) If True, produce human-readable JSON output.
Returns:
A JSON-formatted string in Chrome Trace format.
"""
trace = {}
trace['traceEvents'] = self._metadata + self._events
if pretty:
return json.dumps(trace, indent=4, separators=(',', ': '))
else:
return json.dumps(trace, separators=(',', ':'))
class _TensorTracker(object):
"""An internal class to track the lifetime of a Tensor."""
def __init__(self, name, object_id, timestamp, pid, allocator, num_bytes):
"""Creates an object to track tensor references.
This class is not thread safe and is intended only for internal use by
the 'Timeline' class in this file.
Args:
name: The name of the Tensor as a string.
object_id: Chrome Trace object identifier assigned for this Tensor.
timestamp: The creation timestamp of this event as a long integer.
pid: Process identifier of the associated device, as an integer.
allocator: Name of the allocator used to create the Tensor.
num_bytes: Number of bytes allocated (long integer).
Returns:
A 'TensorTracker' object.
"""
self._name = name
self._pid = pid
self._object_id = object_id
self._create_time = timestamp
self._allocator = allocator
self._num_bytes = num_bytes
self._ref_times = []
self._unref_times = []
@property
def name(self):
"""Name of this tensor."""
return self._name
@property
def pid(self):
"""ID of the process which created this tensor (an integer)."""
return self._pid
@property
def create_time(self):
"""Timestamp when this tensor was created (long integer)."""
return self._create_time
@property
def object_id(self):
"""Returns the object identifier of this tensor (integer)."""
return self._object_id
@property
def num_bytes(self):
"""Size of this tensor in bytes (long integer)."""
return self._num_bytes
@property
def allocator(self):
"""Name of the allocator used to create this tensor (string)."""
return self._allocator
@property
def last_unref(self):
"""Last unreference timestamp of this tensor (long integer)."""
return max(self._unref_times)
def add_ref(self, timestamp):
"""Adds a reference to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object reference as an integer.
"""
self._ref_times.append(timestamp)
def add_unref(self, timestamp):
"""Adds an unref to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object unreference as an integer.
"""
self._unref_times.append(timestamp)
class Timeline(object):
"""A class for visualizing execution timelines of TensorFlow steps."""
def __init__(self, step_stats, graph=None):
"""Constructs a new Timeline.
A 'Timeline' is used for visualizing the execution of a TensorFlow
computation. It shows the timings and concurrency of execution at
the granularity of TensorFlow Ops.
This class is not thread safe.
Args:
step_stats: The 'StepStats' proto recording execution times.
graph: (Optional) The 'Graph' that was executed.
"""
self._origin_step_stats = step_stats
self._step_stats = None
self._graph = graph
self._chrome_trace = _ChromeTraceFormatter()
self._next_pid = 0
self._device_pids = {} # device name -> pid for compute activity.
self._tensor_pids = {} # device name -> pid for tensors.
self._tensors = {} # tensor_name -> TensorTracker
self._next_flow_id = 0
self._flow_starts = {} # tensor_name -> (timestamp, pid, tid)
self._alloc_times = {} # tensor_name -> ( time, allocator, size )
self._allocator_maximums = {} # allocator name => maximum bytes long
def _alloc_pid(self):
"""Allocate a process Id."""
pid = self._next_pid
self._next_pid += 1
return pid
def _alloc_flow_id(self):
"""Allocate a flow Id."""
flow_id = self._next_flow_id
self._next_flow_id += 1
return flow_id
def _parse_op_label(self, label):
"""Parses the fields in a node timeline label."""
# Expects labels of the form: name = op(arg, arg, ...).
match = re.match(r'(.*) = (.*)\((.*)\)', label)
if match is None:
return 'unknown', 'unknown', []
nn, op, inputs = match.groups()
if not inputs:
inputs = []
else:
inputs = inputs.split(', ')
return nn, op, inputs
def _parse_kernel_label(self, label, node_name):
"""Parses the fields in a node timeline label."""
# Expects labels of the form: retval (arg) detail @@annotation
start = label.find('@@')
end = label.find('#')
if start >= 0 and end >= 0 and start + 2 < end:
node_name = label[start + 2:end]
# Node names should always have the form 'name:op'.
fields = node_name.split(':') + ['unknown']
name, op = fields[:2]
return name, op
def _assign_lanes(self):
"""Assigns non-overlapping lanes for the activities on each device."""
for device_stats in self._step_stats.dev_stats:
# TODO(pbar): Genuine thread IDs in NodeExecStats might be helpful.
lanes = [0]
for ns in device_stats.node_stats:
l = -1
for (i, lts) in enumerate(lanes):
if ns.all_start_micros > lts:
l = i
lanes[l] = ns.all_start_micros + ns.all_end_rel_micros
break
if l < 0:
l = len(lanes)
lanes.append(ns.all_start_micros + ns.all_end_rel_micros)
ns.thread_id = l
def _emit_op(self, nodestats, pid, is_gputrace):
"""Generates a Chrome Trace event to show Op execution.
Args:
nodestats: The 'NodeExecStats' proto recording op execution.
pid: The pid assigned for the device where this op ran.
is_gputrace: If True then this op came from the GPUTracer.
"""
node_name = nodestats.node_name
start = nodestats.all_start_micros
duration = nodestats.all_end_rel_micros
tid = nodestats.thread_id
inputs = []
if is_gputrace:
node_name, op = self._parse_kernel_label(nodestats.timeline_label,
node_name)
elif node_name == 'RecvTensor':
# RPC tracing does not use the standard timeline_label format.
op = 'RecvTensor'
else:
_, op, inputs = self._parse_op_label(nodestats.timeline_label)
args = {'name': node_name, 'op': op}
for i, iname in enumerate(inputs):
args['input%d' % i] = iname
self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args)
def _emit_tensor_snapshot(self, tensor, timestamp, pid, tid, value):
"""Generate Chrome Trace snapshot event for a computed Tensor.
Args:
tensor: A 'TensorTracker' object.
timestamp: The timestamp of this snapshot as a long integer.
pid: The pid assigned for showing the device where this op ran.
tid: The tid of the thread computing the tensor snapshot.
value: A JSON-compliant snapshot of the object.
"""
desc = str(value.tensor_description).replace('"', '')
snapshot = {'tensor_description': desc}
self._chrome_trace.emit_obj_snapshot('Tensor', tensor.name, timestamp, pid,
tid, tensor.object_id, snapshot)
def _produce_tensor(self, name, timestamp, tensors_pid, allocator, num_bytes):
object_id = len(self._tensors)
tensor = _TensorTracker(name, object_id, timestamp, tensors_pid, allocator,
num_bytes)
self._tensors[name] = tensor
return tensor
def _is_gputrace_device(self, device_name):
"""Returns true if this device is part of the GPUTracer logging."""
return '/stream:' in device_name or '/memcpy' in device_name
def _allocate_pids(self):
"""Allocate fake process ids for each device in the StepStats."""
self._allocators_pid = self._alloc_pid()
self._chrome_trace.emit_pid('Allocators', self._allocators_pid)
# Add processes in the Chrome trace to show compute and data activity.
for dev_stats in self._step_stats.dev_stats:
device_pid = self._alloc_pid()
self._device_pids[dev_stats.device] = device_pid
tensors_pid = self._alloc_pid()
self._tensor_pids[dev_stats.device] = tensors_pid
self._chrome_trace.emit_pid(dev_stats.device + ' Compute', device_pid)
self._chrome_trace.emit_pid(dev_stats.device + ' Tensors', tensors_pid)
def _analyze_tensors(self, show_memory):
"""Analyze tensor references to track dataflow."""
for dev_stats in self._step_stats.dev_stats:
device_pid = self._device_pids[dev_stats.device]
tensors_pid = self._tensor_pids[dev_stats.device]
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
node_name = node_stats.node_name
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
for index, output in enumerate(node_stats.output):
if index:
output_name = '%s:%d' % (node_name, index)
else:
output_name = node_name
allocation = output.tensor_description.allocation_description
num_bytes = allocation.requested_bytes
allocator_name = allocation.allocator_name
tensor = self._produce_tensor(output_name, start_time, tensors_pid,
allocator_name, num_bytes)
tensor.add_ref(start_time)
tensor.add_unref(end_time)
self._flow_starts[output_name] = (end_time, device_pid, tid)
if show_memory:
self._chrome_trace.emit_obj_create('Tensor', output_name,
start_time, tensors_pid, tid,
tensor.object_id)
self._emit_tensor_snapshot(tensor, end_time - 1, tensors_pid, tid,
output)
def _show_compute(self, show_dataflow):
"""Visualize the computation activity."""
for dev_stats in self._step_stats.dev_stats:
device_name = dev_stats.device
device_pid = self._device_pids[device_name]
is_gputrace = self._is_gputrace_device(device_name)
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
self._emit_op(node_stats, device_pid, is_gputrace)
if is_gputrace or node_stats.node_name == 'RecvTensor':
continue
_, _, inputs = self._parse_op_label(node_stats.timeline_label)
for input_name in inputs:
if input_name not in self._tensors:
# This can happen when partitioning has inserted a Send/Recv.
# We remove the numeric suffix so that the dataflow appears to
# come from the original node. Ideally, the StepStats would
# contain logging for the Send and Recv nodes.
index = input_name.rfind('/_')
if index > 0:
input_name = input_name[:index]
if input_name in self._tensors:
tensor = self._tensors[input_name]
tensor.add_ref(start_time)
tensor.add_unref(end_time - 1)
if show_dataflow:
# We use a different flow ID for every graph edge.
create_time, create_pid, create_tid = self._flow_starts[
input_name]
# Don't add flows when producer and consumer ops are on the same
# pid/tid since the horizontal arrows clutter the visualization.
if create_pid != device_pid or create_tid != tid:
flow_id = self._alloc_flow_id()
self._chrome_trace.emit_flow_start(input_name, create_time,
create_pid, create_tid,
flow_id)
self._chrome_trace.emit_flow_end(input_name, start_time,
device_pid, tid, flow_id)
else:
logging.vlog(1, 'Can\'t find tensor %s - removed by CSE?',
input_name)
def _show_memory_counters(self):
"""Produce a counter series for each memory allocator."""
# Iterate over all tensor trackers to build a list of allocations and
# frees for each allocator. Then sort the lists and emit a cumulative
# counter series for each allocator.
allocations = {}
for name in self._tensors:
tensor = self._tensors[name]
self._chrome_trace.emit_obj_delete('Tensor', name, tensor.last_unref,
tensor.pid, 0, tensor.object_id)
allocator = tensor.allocator
if allocator not in allocations:
allocations[allocator] = []
num_bytes = tensor.num_bytes
allocations[allocator].append((tensor.create_time, num_bytes, name))
allocations[allocator].append((tensor.last_unref, -num_bytes, name))
alloc_maxes = {}
# Generate a counter series showing total allocations for each allocator.
for allocator in allocations:
alloc_list = allocations[allocator]
alloc_list.sort()
total_bytes = 0
alloc_tensor_set = set()
alloc_maxes[allocator] = AllocationMaximum(
timestamp=0, num_bytes=0, tensors=set())
for time, num_bytes, name in sorted(
alloc_list, key=lambda allocation: allocation[0]):
total_bytes += num_bytes
if num_bytes < 0:
alloc_tensor_set.discard(name)
else:
alloc_tensor_set.add(name)
if total_bytes > alloc_maxes[allocator].num_bytes:
alloc_maxes[allocator] = AllocationMaximum(
timestamp=time,
num_bytes=total_bytes,
tensors=copy.deepcopy(alloc_tensor_set))
self._chrome_trace.emit_counter('Memory', allocator,
self._allocators_pid, time, allocator,
total_bytes)
self._allocator_maximums = alloc_maxes
def _preprocess_op_time(self, op_time):
"""Update the start and end time of ops in step stats.
Args:
op_time: How the execution time of op is shown in timeline. Possible values
are "schedule", "gpu" and "all". "schedule" will show op from the time it
is scheduled to the end of the scheduling. Notice by the end of its
scheduling its async kernels may not start yet. It is shown using the
default value from step_stats. "gpu" will show op with the execution time
of its kernels on GPU. "all" will show op from the start of its scheduling
to the end of its last kernel.
"""
if op_time == 'schedule':
self._step_stats = self._origin_step_stats
return
self._step_stats = copy.deepcopy(self._origin_step_stats)
# Separate job task and gpu tracer stream
stream_all_stats = []
job_stats = []
for stats in self._step_stats.dev_stats:
if '/stream:all' in stats.device:
stream_all_stats.append(stats)
elif '/job' in stats.device:
job_stats.append(stats)
# Record the start time of the first kernel and the end time of
# the last gpu kernel for all ops.
op_gpu_start = {}
op_gpu_end = {}
for stats in stream_all_stats:
for kernel in stats.node_stats:
name, _ = self._parse_kernel_label(kernel.timeline_label,
kernel.node_name)
start = kernel.all_start_micros
end = kernel.all_start_micros + kernel.all_end_rel_micros
if name in op_gpu_start:
op_gpu_start[name] = min(op_gpu_start[name], start)
op_gpu_end[name] = max(op_gpu_end[name], end)
else:
op_gpu_start[name] = start
op_gpu_end[name] = end
# Update the start and end time of each op according to the op_time
for stats in job_stats:
for op in stats.node_stats:
if op.node_name in op_gpu_start:
end = max(op_gpu_end[op.node_name],
op.all_start_micros + op.all_end_rel_micros)
if op_time == 'gpu':
op.all_start_micros = op_gpu_start[op.node_name]
op.all_end_rel_micros = end - op.all_start_micros
def analyze_step_stats(self,
show_dataflow=True,
show_memory=True,
op_time='schedule'):
"""Analyze the step stats and format it into Chrome Trace Format.
Args:
show_dataflow: (Optional.) If True, add flow events to the trace
connecting producers and consumers of tensors.
show_memory: (Optional.) If True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
op_time: (Optional.) How the execution time of op is shown in timeline.
Possible values are "schedule", "gpu" and "all". "schedule" will show op
from the time it is scheduled to the end of the scheduling. Notice by
the end of its scheduling its async kernels may not start yet. It is
shown using the default value from step_stats. "gpu" will show op with
the execution time of its kernels on GPU. "all" will show op from the
start of its scheduling to the end of its last kernel.
Returns:
A 'StepStatsAnalysis' object.
"""
self._preprocess_op_time(op_time)
self._allocate_pids()
self._assign_lanes()
self._analyze_tensors(show_memory)
self._show_compute(show_dataflow)
if show_memory:
self._show_memory_counters()
return StepStatsAnalysis(
chrome_trace=self._chrome_trace,
allocator_maximums=self._allocator_maximums)
def generate_chrome_trace_format(self,
show_dataflow=True,
show_memory=False,
op_time='schedule'):
"""Produces a trace in Chrome Trace Format.
Args:
show_dataflow: (Optional.) If True, add flow events to the trace
connecting producers and consumers of tensors.
show_memory: (Optional.) If True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
op_time: (Optional.) How the execution time of op is shown in timeline.
Possible values are "schedule", "gpu" and "all".
"schedule" will show op from the time it is scheduled to the end of
the scheduling.
Notice by the end of its scheduling its async kernels may not start
yet. It is shown using the default value from step_stats.
"gpu" will show op with the execution time of its kernels on GPU.
"all" will show op from the start of its scheduling to the end of
its last kernel.
Returns:
A JSON formatted string in Chrome Trace format.
"""
step_stats_analysis = self.analyze_step_stats(
show_dataflow=show_dataflow, show_memory=show_memory, op_time=op_time)
return step_stats_analysis.chrome_trace.format_to_string(pretty=True)
|
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter, RectangularOutputPlotter, StartEndMeanPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from neuralnilm.rectangulariser import rectangularise
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity, softmax
from lasagne.objectives import squared_error, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer,
DimshuffleLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
447: first attempt at disaggregation
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 25000
N_SEQ_PER_BATCH = 64
MAX_TARGET_POWER = 300
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
['washer dryer', 'washing machine'],
'hair straighteners',
'television',
'dish washer'
],
max_appliance_powers=[MAX_TARGET_POWER, 2400, 500, 200, 2500],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 1800, 60, 60, 1800],
min_off_durations=[12, 600, 12, 12, 1800],
window=("2013-03-18", "2013-04-18"),
seq_length=512,
output_one_appliance=True,
train_buildings=[1],
validation_buildings=[1],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
independently_center_inputs=False,
skip_probability=0.75,
skip_probability_for_first_appliance=0.5,
target_is_start_and_end_and_mean=True,
one_target_per_seq=False
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: squared_error(x, t).mean(),
# loss_function=mse,
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
1000: 1e-3,
50000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=Plotter(n_seq_to_plot=32)
plotter=StartEndMeanPlotter(n_seq_to_plot=32, max_target_power=MAX_TARGET_POWER)
)
def exp_a(name):
# conv, conv
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
logger=logging.getLogger(name)
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 16
target_seq_length = source.output_shape_after_processing()[1]
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
# {
# 'type': DropoutLayer,
# 'p': 0.5
# },
{
'type': DenseLayer,
'num_units': 512,
'nonlinearity': rectify
},
# {
# 'type': DropoutLayer,
# 'p': 0.5
# },
{
'type': DenseLayer,
'num_units': 512,
'nonlinearity': rectify
},
# {
# 'type': DropoutLayer,
# 'p': 0.5
# },
{
'type': DenseLayer,
'num_units': 512,
'nonlinearity': rectify
},
# {
# 'type': DropoutLayer,
# 'p': 0.5
# },
{
'type': DenseLayer,
'num_units': target_seq_length,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def main():
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e527.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
|
|
#!/usr/bin/env python3
from argparse import ArgumentParser
from io import StringIO
from enum import Enum, auto
import os.path
import sys
import re
class Cell:
def __init__(self, name, keep=False, port_attrs={}):
self.name = name
self.keep = keep
self.port_attrs = port_attrs
CELLS = [
# Design element types listed in:
# - UG607 (Spartan 3)
# - UG613 (Spartan 3A)
# - UG617 (Spartan 3E)
# - UG615 (Spartan 6)
# - UG619 (Virtex 4)
# - UG621 (Virtex 5)
# - UG623 (Virtex 6)
# - UG953 (Series 7)
# - UG974 (Ultrascale)
# CLB -- RAM/ROM.
# Cell('RAM16X1S', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM16X1S_1', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM32X1S', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM32X1S_1', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM64X1S', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM64X1S_1', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM128X1S', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM128X1S_1', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM256X1S', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM512X1S', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM16X2S', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM32X2S', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM64X2S', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM16X4S', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM32X4S', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM16X8S', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM32X8S', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM16X1D', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM16X1D_1', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM32X1D', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM32X1D_1', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM64X1D', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM64X1D_1', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM128X1D', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM256X1D', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM32M', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM32M16', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM64M', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('RAM64M8', port_attrs={'WCLK': ['clkbuf_sink']}),
# Cell('ROM16X1'),
# Cell('ROM32X1'),
# Cell('ROM64X1'),
# Cell('ROM128X1'),
# Cell('ROM256X1'),
# CLB -- registers/latches.
# Virtex 1/2/4/5, Spartan 3.
# Cell('FDCPE', port_attrs={'C': ['clkbuf_sink']}),
# Cell('FDRSE', port_attrs={'C': ['clkbuf_sink']}),
# Cell('LDCPE', port_attrs={'C': ['clkbuf_sink']}),
# Virtex 6, Spartan 6, Series 7, Ultrascale.
# Cell('FDCE'),
# Cell('FDPE'),
# Cell('FDRE'),
# Cell('FDSE'),
# Cell('LDCE'),
# Cell('LDPE'),
# Cell('AND2B1L'),
# Cell('OR2L'),
# CLB -- other.
# Cell('LUT1'),
# Cell('LUT2'),
# Cell('LUT3'),
# Cell('LUT4'),
# Cell('LUT5'),
# Cell('LUT6'),
# Cell('LUT6_2'),
# Cell('MUXF5'),
# Cell('MUXF6'),
# Cell('MUXF7'),
# Cell('MUXF8'),
# Cell('MUXF9'),
# Cell('CARRY4'),
# Cell('CARRY8'),
# Cell('MUXCY'),
# Cell('XORCY'),
# Cell('ORCY'),
# Cell('MULT_AND'),
# Cell('SRL16', port_attrs={'CLK': ['clkbuf_sink']}),
# Cell('SRL16E', port_attrs={'CLK': ['clkbuf_sink']}),
# Cell('SRLC16', port_attrs={'CLK': ['clkbuf_sink']}),
# Cell('SRLC16E', port_attrs={'CLK': ['clkbuf_sink']}),
# Cell('SRLC32E', port_attrs={'CLK': ['clkbuf_sink']}),
# Cell('CFGLUT5', port_attrs={'CLK': ['clkbuf_sink']}),
# Block RAM.
# Virtex.
# TODO: RAMB4_*
# Virtex 2, Spartan 3.
Cell('RAMB16_S1', port_attrs={'CLK': ['clkbuf_sink']}),
Cell('RAMB16_S2', port_attrs={'CLK': ['clkbuf_sink']}),
Cell('RAMB16_S4', port_attrs={'CLK': ['clkbuf_sink']}),
Cell('RAMB16_S9', port_attrs={'CLK': ['clkbuf_sink']}),
Cell('RAMB16_S18', port_attrs={'CLK': ['clkbuf_sink']}),
Cell('RAMB16_S36', port_attrs={'CLK': ['clkbuf_sink']}),
Cell('RAMB16_S1_S1', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S1_S2', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S1_S4', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S1_S9', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S1_S18', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S1_S36', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S2_S2', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S2_S4', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S2_S9', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S2_S18', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S2_S36', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S4_S4', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S4_S9', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S4_S18', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S4_S36', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S9_S9', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S9_S18', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S9_S36', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S18_S18', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S18_S36', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16_S36_S36', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
# Spartan 3A (in addition to above).
Cell('RAMB16BWE_S18', port_attrs={'CLK': ['clkbuf_sink']}),
Cell('RAMB16BWE_S36', port_attrs={'CLK': ['clkbuf_sink']}),
Cell('RAMB16BWE_S18_S9', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16BWE_S18_S18', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16BWE_S36_S9', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16BWE_S36_S18', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB16BWE_S36_S36', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
# Spartan 3A DSP.
Cell('RAMB16BWER', port_attrs={ 'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
# Spartan 6 (in addition to above).
Cell('RAMB8BWER', port_attrs={ 'CLKAWRCLK': ['clkbuf_sink'], 'CLKBRDCLK': ['clkbuf_sink']}),
# Virtex 4.
Cell('FIFO16', port_attrs={'RDCLK': ['clkbuf_sink'], 'WRCLK': ['clkbuf_sink']}),
Cell('RAMB16', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB32_S64_ECC', port_attrs={'RDCLK': ['clkbuf_sink'], 'WRCLK': ['clkbuf_sink']}),
# Virtex 5.
Cell('FIFO18', port_attrs={'RDCLK': ['clkbuf_sink'], 'WRCLK': ['clkbuf_sink']}),
Cell('FIFO18_36', port_attrs={'RDCLK': ['clkbuf_sink'], 'WRCLK': ['clkbuf_sink']}),
Cell('FIFO36', port_attrs={'RDCLK': ['clkbuf_sink'], 'WRCLK': ['clkbuf_sink']}),
Cell('FIFO36_72', port_attrs={'RDCLK': ['clkbuf_sink'], 'WRCLK': ['clkbuf_sink']}),
Cell('RAMB18', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB36', port_attrs={'CLKA': ['clkbuf_sink'], 'CLKB': ['clkbuf_sink']}),
Cell('RAMB18SDP', port_attrs={'RDCLK': ['clkbuf_sink'], 'WRCLK': ['clkbuf_sink']}),
Cell('RAMB36SDP', port_attrs={'RDCLK': ['clkbuf_sink'], 'WRCLK': ['clkbuf_sink']}),
# Virtex 6 / Series 7.
Cell('FIFO18E1', port_attrs={'RDCLK': ['clkbuf_sink'], 'WRCLK': ['clkbuf_sink']}),
Cell('FIFO36E1', port_attrs={'RDCLK': ['clkbuf_sink'], 'WRCLK': ['clkbuf_sink']}),
#Cell('RAMB18E1', port_attrs={'CLKARDCLK': ['clkbuf_sink'], 'CLKBWRCLK': ['clkbuf_sink']]}),
#Cell('RAMB36E1', port_attrs={'CLKARDCLK': ['clkbuf_sink'], 'CLKBWRCLK': ['clkbuf_sink']]}),
# Ultrascale.
Cell('FIFO18E2', port_attrs={'RDCLK': ['clkbuf_sink'], 'WRCLK': ['clkbuf_sink']}),
Cell('FIFO36E2', port_attrs={'RDCLK': ['clkbuf_sink'], 'WRCLK': ['clkbuf_sink']}),
Cell('RAMB18E2', port_attrs={'CLKARDCLK': ['clkbuf_sink'], 'CLKBWRCLK': ['clkbuf_sink']}),
Cell('RAMB36E2', port_attrs={'CLKARDCLK': ['clkbuf_sink'], 'CLKBWRCLK': ['clkbuf_sink']}),
# Ultra RAM.
Cell('URAM288', port_attrs={'CLK': ['clkbuf_sink']}),
Cell('URAM288_BASE', port_attrs={'CLK': ['clkbuf_sink']}),
# Multipliers and DSP.
# Cell('MULT18X18'), # Virtex 2, Spartan 3
# Cell('MULT18X18S', port_attrs={'C': ['clkbuf_sink']}), # Spartan 3
# Cell('MULT18X18SIO', port_attrs={'CLK': ['clkbuf_sink']}), # Spartan 3E
# Cell('DSP48A', port_attrs={'CLK': ['clkbuf_sink']}), # Spartan 3A DSP
# Cell('DSP48A1', port_attrs={'CLK': ['clkbuf_sink']}), # Spartan 6
# Cell('DSP48', port_attrs={'CLK': ['clkbuf_sink']}), # Virtex 4
Cell('DSP48E', port_attrs={'CLK': ['clkbuf_sink']}), # Virtex 5
#Cell('DSP48E1', port_attrs={'CLK': ['clkbuf_sink']}), # Virtex 6 / Series 7
Cell('DSP48E2', port_attrs={'CLK': ['clkbuf_sink']}), # Ultrascale
# I/O logic.
# Virtex 2, Spartan 3.
Cell('IFDDRCPE', port_attrs={'C0': ['clkbuf_sink'], 'C1': ['clkbuf_sink'], 'D': ['iopad_external_pin']}),
Cell('IFDDRRSE', port_attrs={'C0': ['clkbuf_sink'], 'C1': ['clkbuf_sink'], 'D': ['iopad_external_pin']}),
Cell('OFDDRCPE', port_attrs={'C0': ['clkbuf_sink'], 'C1': ['clkbuf_sink'], 'Q': ['iopad_external_pin']}),
Cell('OFDDRRSE', port_attrs={'C0': ['clkbuf_sink'], 'C1': ['clkbuf_sink'], 'Q': ['iopad_external_pin']}),
Cell('OFDDRTCPE', port_attrs={'C0': ['clkbuf_sink'], 'C1': ['clkbuf_sink'], 'O': ['iopad_external_pin']}),
Cell('OFDDRTRSE', port_attrs={'C0': ['clkbuf_sink'], 'C1': ['clkbuf_sink'], 'O': ['iopad_external_pin']}),
# Spartan 3E.
Cell('IDDR2', port_attrs={'C0': ['clkbuf_sink'], 'C1': ['clkbuf_sink']}),
Cell('ODDR2', port_attrs={'C0': ['clkbuf_sink'], 'C1': ['clkbuf_sink']}),
# Virtex 4.
Cell('IDDR', port_attrs={'C': ['clkbuf_sink']}),
Cell('IDDR_2CLK', port_attrs={'C': ['clkbuf_sink'], 'CB': ['clkbuf_sink']}),
Cell('ODDR', port_attrs={'C': ['clkbuf_sink']}),
Cell('IDELAYCTRL', keep=True, port_attrs={'REFCLK': ['clkbuf_sink']}),
Cell('IDELAY', port_attrs={'C': ['clkbuf_sink']}),
Cell('ISERDES', port_attrs={
'CLK': ['clkbuf_sink'],
'OCLK': ['clkbuf_sink'],
'CLKDIV': ['clkbuf_sink'],
}),
Cell('OSERDES', port_attrs={'CLK': ['clkbuf_sink'], 'CLKDIV': ['clkbuf_sink']}),
# Virtex 5.
Cell('IODELAY', port_attrs={'C': ['clkbuf_sink']}),
Cell('ISERDES_NODELAY', port_attrs={
'CLK': ['clkbuf_sink'],
'CLKB': ['clkbuf_sink'],
'OCLK': ['clkbuf_sink'],
'CLKDIV': ['clkbuf_sink'],
}),
# Virtex 6.
Cell('IODELAYE1', port_attrs={'C': ['clkbuf_sink']}),
Cell('ISERDESE1', port_attrs={
'CLK': ['clkbuf_sink'],
'CLKB': ['clkbuf_sink'],
'OCLK': ['clkbuf_sink'],
'CLKDIV': ['clkbuf_sink'],
}),
Cell('OSERDESE1', port_attrs={'CLK': ['clkbuf_sink'], 'CLKDIV': ['clkbuf_sink']}),
# Series 7.
Cell('IDELAYE2', port_attrs={'C': ['clkbuf_sink']}),
Cell('ODELAYE2', port_attrs={'C': ['clkbuf_sink']}),
Cell('ISERDESE2', port_attrs={
'CLK': ['clkbuf_sink'],
'CLKB': ['clkbuf_sink'],
'OCLK': ['clkbuf_sink'],
'OCLKB': ['clkbuf_sink'],
'CLKDIV': ['clkbuf_sink'],
'CLKDIVP': ['clkbuf_sink'],
}),
Cell('OSERDESE2', port_attrs={'CLK': ['clkbuf_sink'], 'CLKDIV': ['clkbuf_sink']}),
Cell('PHASER_IN'),
Cell('PHASER_IN_PHY'),
Cell('PHASER_OUT'),
Cell('PHASER_OUT_PHY'),
Cell('PHASER_REF'),
Cell('PHY_CONTROL'),
# Ultrascale.
Cell('IDDRE1', port_attrs={'C': ['clkbuf_sink'], 'CB': ['clkbuf_sink']}),
Cell('ODDRE1', port_attrs={'C': ['clkbuf_sink']}),
Cell('IDELAYE3', port_attrs={'CLK': ['clkbuf_sink']}),
Cell('ODELAYE3', port_attrs={'CLK': ['clkbuf_sink']}),
Cell('ISERDESE3', port_attrs={
'CLK': ['clkbuf_sink'],
'CLK_B': ['clkbuf_sink'],
'FIFO_RD_CLK': ['clkbuf_sink'],
'CLKDIV': ['clkbuf_sink'],
}),
Cell('OSERDESE3', port_attrs={'CLK': ['clkbuf_sink'], 'CLKDIV': ['clkbuf_sink']}),
Cell('BITSLICE_CONTROL', keep=True),
Cell('RIU_OR'),
Cell('RX_BITSLICE'),
Cell('RXTX_BITSLICE'),
Cell('TX_BITSLICE'),
Cell('TX_BITSLICE_TRI'),
# Spartan 6.
Cell('IODELAY2', port_attrs={'IOCLK0': ['clkbuf_sink'], 'IOCLK1': ['clkbuf_sink'], 'CLK': ['clkbuf_sink']}),
Cell('IODRP2', port_attrs={'IOCLK0': ['clkbuf_sink'], 'IOCLK1': ['clkbuf_sink'], 'CLK': ['clkbuf_sink']}),
Cell('IODRP2_MCB', port_attrs={'IOCLK0': ['clkbuf_sink'], 'IOCLK1': ['clkbuf_sink'], 'CLK': ['clkbuf_sink']}),
Cell('ISERDES2', port_attrs={
'CLK0': ['clkbuf_sink'],
'CLK1': ['clkbuf_sink'],
'CLKDIV': ['clkbuf_sink'],
}),
Cell('OSERDES2', port_attrs={
'CLK0': ['clkbuf_sink'],
'CLK1': ['clkbuf_sink'],
'CLKDIV': ['clkbuf_sink'],
}),
# I/O buffers.
# Input.
# Cell('IBUF', port_attrs={'I': ['iopad_external_pin']}),
Cell('IBUF_DLY_ADJ', port_attrs={'I': ['iopad_external_pin']}),
Cell('IBUF_IBUFDISABLE', port_attrs={'I': ['iopad_external_pin']}),
Cell('IBUF_INTERMDISABLE', port_attrs={'I': ['iopad_external_pin']}),
Cell('IBUF_ANALOG', port_attrs={'I': ['iopad_external_pin']}),
Cell('IBUFE3', port_attrs={'I': ['iopad_external_pin']}),
Cell('IBUFDS', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
Cell('IBUFDS_DLY_ADJ', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
Cell('IBUFDS_IBUFDISABLE', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
Cell('IBUFDS_INTERMDISABLE', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
Cell('IBUFDS_DIFF_OUT', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
Cell('IBUFDS_DIFF_OUT_IBUFDISABLE', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
Cell('IBUFDS_DIFF_OUT_INTERMDISABLE', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
Cell('IBUFDSE3', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
Cell('IBUFDS_DPHY', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
# Cell('IBUFG', port_attrs={'I': ['iopad_external_pin']}),
Cell('IBUFGDS', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
Cell('IBUFGDS_DIFF_OUT', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
# I/O.
# Cell('IOBUF', port_attrs={'IO': ['iopad_external_pin']}),
Cell('IOBUF_DCIEN', port_attrs={'IO': ['iopad_external_pin']}),
Cell('IOBUF_INTERMDISABLE', port_attrs={'IO': ['iopad_external_pin']}),
Cell('IOBUFE3', port_attrs={'IO': ['iopad_external_pin']}),
Cell('IOBUFDS', port_attrs={'IO': ['iopad_external_pin'], 'IOB': ['iopad_external_pin']}),
Cell('IOBUFDS_DCIEN', port_attrs={'IO': ['iopad_external_pin'], 'IOB': ['iopad_external_pin']}),
Cell('IOBUFDS_INTERMDISABLE', port_attrs={'IO': ['iopad_external_pin'], 'IOB': ['iopad_external_pin']}),
Cell('IOBUFDS_DIFF_OUT', port_attrs={'IO': ['iopad_external_pin'], 'IOB': ['iopad_external_pin']}),
Cell('IOBUFDS_DIFF_OUT_DCIEN', port_attrs={'IO': ['iopad_external_pin'], 'IOB': ['iopad_external_pin']}),
Cell('IOBUFDS_DIFF_OUT_INTERMDISABLE', port_attrs={'IO': ['iopad_external_pin'], 'IOB': ['iopad_external_pin']}),
Cell('IOBUFDSE3', port_attrs={'IO': ['iopad_external_pin'], 'IOB': ['iopad_external_pin']}),
# Output.
# Cell('OBUF', port_attrs={'O': ['iopad_external_pin']}),
Cell('OBUFDS', port_attrs={'O': ['iopad_external_pin'], 'OB': ['iopad_external_pin']}),
Cell('OBUFDS_DPHY', port_attrs={'O': ['iopad_external_pin'], 'OB': ['iopad_external_pin']}),
# Output + tristate.
# Cell('OBUFT', port_attrs={'O': ['iopad_external_pin']}),
Cell('OBUFTDS', port_attrs={'O': ['iopad_external_pin'], 'OB': ['iopad_external_pin']}),
# Pulls.
Cell('KEEPER'),
Cell('PULLDOWN'),
Cell('PULLUP'),
# Misc.
Cell('DCIRESET', keep=True),
Cell('HPIO_VREF'), # Ultrascale
# Clock buffers (global).
# Cell('BUFG', port_attrs={'O': ['clkbuf_driver']}),
Cell('BUFGCE', port_attrs={'O': ['clkbuf_driver']}),
Cell('BUFGCE_1', port_attrs={'O': ['clkbuf_driver']}),
Cell('BUFGMUX', port_attrs={'O': ['clkbuf_driver']}),
Cell('BUFGMUX_1', port_attrs={'O': ['clkbuf_driver']}),
#Cell('BUFGCTRL', port_attrs={'O': ['clkbuf_driver']}),
Cell('BUFGMUX_CTRL', port_attrs={'O': ['clkbuf_driver']}),
Cell('BUFGMUX_VIRTEX4', port_attrs={'O': ['clkbuf_driver']}),
Cell('BUFG_GT', port_attrs={'O': ['clkbuf_driver']}),
Cell('BUFG_GT_SYNC'),
Cell('BUFG_PS', port_attrs={'O': ['clkbuf_driver']}),
Cell('BUFGCE_DIV', port_attrs={'O': ['clkbuf_driver']}),
Cell('BUFH', port_attrs={'O': ['clkbuf_driver']}),
#Cell('BUFHCE', port_attrs={'O': ['clkbuf_driver']}),
# Clock buffers (IO) -- Spartan 6.
Cell('BUFIO2', port_attrs={'IOCLK': ['clkbuf_driver'], 'DIVCLK': ['clkbuf_driver']}),
Cell('BUFIO2_2CLK', port_attrs={'IOCLK': ['clkbuf_driver'], 'DIVCLK': ['clkbuf_driver']}),
Cell('BUFIO2FB', port_attrs={'O': ['clkbuf_driver']}),
Cell('BUFPLL', port_attrs={'IOCLK': ['clkbuf_driver']}),
Cell('BUFPLL_MCB', port_attrs={'IOCLK0': ['clkbuf_driver'], 'IOCLK1': ['clkbuf_driver']}),
# Clock buffers (IO and regional) -- Virtex.
Cell('BUFIO', port_attrs={'O': ['clkbuf_driver']}),
Cell('BUFIODQS', port_attrs={'O': ['clkbuf_driver']}),
Cell('BUFR', port_attrs={'O': ['clkbuf_driver']}),
Cell('BUFMR', port_attrs={'O': ['clkbuf_driver']}),
Cell('BUFMRCE', port_attrs={'O': ['clkbuf_driver']}),
# Clock components.
# VIrtex.
# TODO: CLKDLL
# TODO: CLKDLLE
# TODO: CLKDLLHF
# Virtex 2, Spartan 3.
Cell('DCM'),
# Spartan 3E.
Cell('DCM_SP'),
# Spartan 6 (also uses DCM_SP and PLL_BASE).
Cell('DCM_CLKGEN'),
# Virtex 4/5.
Cell('DCM_ADV'),
Cell('DCM_BASE'),
Cell('DCM_PS'),
# Virtex 4.
Cell('PMCD'),
# Virtex 5.
Cell('PLL_ADV'),
Cell('PLL_BASE'),
# Virtex 6.
Cell('MMCM_ADV'),
Cell('MMCM_BASE'),
# Series 7.
Cell('MMCME2_ADV'),
Cell('MMCME2_BASE'),
Cell('PLLE2_ADV'),
Cell('PLLE2_BASE'),
# Ultrascale.
Cell('MMCME3_ADV'),
Cell('MMCME3_BASE'),
Cell('PLLE3_ADV'),
Cell('PLLE3_BASE'),
# Ultrascale+.
Cell('MMCME4_ADV'),
Cell('MMCME4_BASE'),
Cell('PLLE4_ADV'),
Cell('PLLE4_BASE'),
# Misc stuff.
Cell('BUFT'),
# Series 7 I/O FIFOs.
Cell('IN_FIFO', port_attrs={'RDCLK': ['clkbuf_sink'], 'WRCLK': ['clkbuf_sink']}),
Cell('OUT_FIFO', port_attrs={'RDCLK': ['clkbuf_sink'], 'WRCLK': ['clkbuf_sink']}),
# Ultrascale special synchronizer register.
Cell('HARD_SYNC', port_attrs={'CLK': ['clkbuf_sink']}),
# Singletons.
# Startup.
# TODO: STARTUP_VIRTEX
# TODO: STARTUP_VIRTEX2
Cell('STARTUP_SPARTAN3', keep=True),
Cell('STARTUP_SPARTAN3E', keep=True),
Cell('STARTUP_SPARTAN3A', keep=True),
Cell('STARTUP_SPARTAN6', keep=True),
Cell('STARTUP_VIRTEX4', keep=True),
Cell('STARTUP_VIRTEX5', keep=True),
Cell('STARTUP_VIRTEX6', keep=True),
Cell('STARTUPE2', keep=True), # Series 7
Cell('STARTUPE3', keep=True), # Ultrascale
# Capture trigger.
# TODO: CAPTURE_VIRTEX
# TODO: CAPTURE_VIRTEX2
Cell('CAPTURE_SPARTAN3', keep=True),
Cell('CAPTURE_SPARTAN3A', keep=True),
Cell('CAPTURE_VIRTEX4', keep=True),
Cell('CAPTURE_VIRTEX5', keep=True),
Cell('CAPTURE_VIRTEX6', keep=True),
Cell('CAPTUREE2', keep=True), # Series 7
# Internal Configuration Access Port.
# TODO: ICAP_VIRTEX2
Cell('ICAP_SPARTAN3A', keep=True),
Cell('ICAP_SPARTAN6', keep=True),
Cell('ICAP_VIRTEX4', keep=True),
Cell('ICAP_VIRTEX5', keep=True),
Cell('ICAP_VIRTEX6', keep=True),
Cell('ICAPE2', keep=True), # Series 7
Cell('ICAPE3', keep=True), # Ultrascale
# JTAG.
# TODO: BSCAN_VIRTEX
# TODO: BSCAN_VIRTEX2
Cell('BSCAN_SPARTAN3', keep=True),
Cell('BSCAN_SPARTAN3A', keep=True),
Cell('BSCAN_SPARTAN6', keep=True),
Cell('BSCAN_VIRTEX4', keep=True),
Cell('BSCAN_VIRTEX5', keep=True),
Cell('BSCAN_VIRTEX6', keep=True),
Cell('BSCANE2', keep=True), # Series 7, Ultrascale
# DNA port.
Cell('DNA_PORT'), # Virtex 5/6, Series 7, Spartan 3A/6
Cell('DNA_PORTE2'), # Ultrascale
# Frame ECC.
Cell('FRAME_ECC_VIRTEX4'),
Cell('FRAME_ECC_VIRTEX5'),
Cell('FRAME_ECC_VIRTEX6'),
Cell('FRAME_ECCE2'), # Series 7
Cell('FRAME_ECCE3'), # Ultrascale
# AXSS command access.
Cell('USR_ACCESS_VIRTEX4'),
Cell('USR_ACCESS_VIRTEX5'),
Cell('USR_ACCESS_VIRTEX6'),
Cell('USR_ACCESSE2'), # Series 7, Ultrascale
# Misc.
Cell('POST_CRC_INTERNAL'), # Spartan 6
Cell('SUSPEND_SYNC', keep=True), # Spartan 6
Cell('KEY_CLEAR', keep=True), # Virtex 5
Cell('MASTER_JTAG', keep=True), # Ultrascale
Cell('SPI_ACCESS', keep=True), # Spartan 3AN
Cell('EFUSE_USR'),
# ADC.
Cell('SYSMON'), # Virtex 5/6
Cell('XADC'), # Series 7
Cell('SYSMONE1'), # Ultrascale
Cell('SYSMONE4'), # Ultrascale+
# Gigabit transceivers.
# Spartan 6.
Cell('GTPA1_DUAL'),
# Virtex 2 Pro.
# TODO: GT_*
# TODO: GT10_*
# Virtex 4.
Cell('GT11_CUSTOM'),
Cell('GT11_DUAL'),
Cell('GT11CLK'),
Cell('GT11CLK_MGT'),
# Virtex 5.
Cell('GTP_DUAL'),
Cell('GTX_DUAL'),
Cell('CRC32', port_attrs={'CRCCLK': ['clkbuf_sink']}),
Cell('CRC64', port_attrs={'CRCCLK': ['clkbuf_sink']}),
# Virtex 6.
Cell('GTHE1_QUAD'),
Cell('GTXE1'),
Cell('IBUFDS_GTXE1', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
Cell('IBUFDS_GTHE1', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
# Series 7.
Cell('GTHE2_CHANNEL'),
Cell('GTHE2_COMMON'),
Cell('GTPE2_CHANNEL'),
Cell('GTPE2_COMMON'),
Cell('GTXE2_CHANNEL'),
Cell('GTXE2_COMMON'),
Cell('IBUFDS_GTE2', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
# Ultrascale.
Cell('GTHE3_CHANNEL'),
Cell('GTHE3_COMMON'),
Cell('GTHE4_CHANNEL'),
Cell('GTHE4_COMMON'),
Cell('GTYE3_CHANNEL'),
Cell('GTYE3_COMMON'),
Cell('GTYE4_CHANNEL'),
Cell('GTYE4_COMMON'),
Cell('IBUFDS_GTE3', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
Cell('IBUFDS_GTE4', port_attrs={'I': ['iopad_external_pin'], 'IB': ['iopad_external_pin']}),
Cell('OBUFDS_GTE3', port_attrs={'O': ['iopad_external_pin'], 'OB': ['iopad_external_pin']}),
Cell('OBUFDS_GTE3_ADV', port_attrs={'O': ['iopad_external_pin'], 'OB': ['iopad_external_pin']}),
Cell('OBUFDS_GTE4', port_attrs={'O': ['iopad_external_pin'], 'OB': ['iopad_external_pin']}),
Cell('OBUFDS_GTE4_ADV', port_attrs={'O': ['iopad_external_pin'], 'OB': ['iopad_external_pin']}),
# PCIE IP.
Cell('PCIE_A1'), # Spartan 6
Cell('PCIE_EP'), # Virtex 5
Cell('PCIE_2_0'), # Virtex 6
Cell('PCIE_2_1'), # Series 7
Cell('PCIE_3_0'), # Series 7
Cell('PCIE_3_1'), # Ultrascale
Cell('PCIE40E4'), # Ultrascale+
# Ethernet IP.
Cell('EMAC'), # Virtex 4
Cell('TEMAC'), # Virtex 5
Cell('TEMAC_SINGLE'), # Virtex 6
Cell('CMAC'), # Ultrascale
Cell('CMACE4'), # Ultrsacale+
# PowerPC.
# TODO PPC405 (Virtex 2)
Cell('PPC405_ADV'), # Virtex 4
Cell('PPC440'), # Virtex 5
# Misc hard IP.
Cell('MCB'), # Spartan 6 Memory Controller Block
Cell('PS7', keep=True), # The Zynq 7000 ARM Processor System.
Cell('PS8', keep=True), # The Zynq Ultrascale+ ARM Processor System.
Cell('ILKN'), # Ultrascale Interlaken
Cell('ILKNE4'), # Ultrascale+ Interlaken
]
class State(Enum):
OUTSIDE = auto()
IN_MODULE = auto()
IN_OTHER_MODULE = auto()
IN_FUNCTION = auto()
IN_TASK = auto()
def xtract_cell_decl(cell, dirs, outf):
for dir in dirs:
fname = os.path.join(dir, cell.name + '.v')
try:
with open(fname) as f:
state = State.OUTSIDE
found = False
# Probably the most horrible Verilog "parser" ever written.
module_ports = []
invertible_ports = set()
for l in f:
l = l.partition('//')[0]
l = l.strip()
if l == 'module {}'.format(cell.name) or l.startswith('module {} '.format(cell.name)):
if found:
print('Multiple modules in {}.'.format(fname))
sys.exit(1)
elif state != State.OUTSIDE:
print('Nested modules in {}.'.format(fname))
sys.exit(1)
found = True
state = State.IN_MODULE
if cell.keep:
outf.write('(* keep *)\n')
outf.write('module {} (...);\n'.format(cell.name))
elif l.startswith('module '):
if state != State.OUTSIDE:
print('Nested modules in {}.'.format(fname))
sys.exit(1)
state = State.IN_OTHER_MODULE
elif l.startswith('task '):
if state == State.IN_MODULE:
state = State.IN_TASK
elif l.startswith('function '):
if state == State.IN_MODULE:
state = State.IN_FUNCTION
elif l == 'endtask':
if state == State.IN_TASK:
state = State.IN_MODULE
elif l == 'endfunction':
if state == State.IN_FUNCTION:
state = State.IN_MODULE
elif l == 'endmodule':
if state == State.IN_MODULE:
for kind, rng, port in module_ports:
for attr in cell.port_attrs.get(port, []):
outf.write(' (* {} *)\n'.format(attr))
if port in invertible_ports:
outf.write(' (* invertible_pin = "IS_{}_INVERTED" *)\n'.format(port))
if rng is None:
outf.write(' {} {};\n'.format(kind, port))
else:
outf.write(' {} {} {};\n'.format(kind, rng, port))
outf.write(l + '\n')
outf.write('\n')
elif state != State.IN_OTHER_MODULE:
print('endmodule in weird place in {}.'.format(cell.name, fname))
sys.exit(1)
state = State.OUTSIDE
elif l.startswith(('input ', 'output ', 'inout ')) and state == State.IN_MODULE:
if l.endswith((';', ',')):
l = l[:-1]
if ';' in l:
print('Weird port line in {} [{}].'.format(fname, l))
sys.exit(1)
kind, _, ports = l.partition(' ')
for port in ports.split(','):
port = port.strip()
if port.startswith('['):
rng, port = port.split()
else:
rng = None
module_ports.append((kind, rng, port))
elif l.startswith('parameter ') and state == State.IN_MODULE:
if 'UNPLACED' in l:
continue
if l.endswith((';', ',')):
l = l[:-1]
while ' ' in l:
l = l.replace(' ', ' ')
if ';' in l:
print('Weird parameter line in {} [{}].'.format(fname, l))
sys.exit(1)
outf.write(' {};\n'.format(l))
match = re.search('IS_([a-zA-Z0-9_]+)_INVERTED', l)
if match:
invertible_ports.add(match[1])
if state != State.OUTSIDE:
print('endmodule not found in {}.'.format(fname))
sys.exit(1)
if not found:
print('Cannot find module {} in {}.'.format(cell.name, fname))
sys.exit(1)
return
except FileNotFoundError:
continue
print('Cannot find {}.'.format(cell.name))
sys.exit(1)
if __name__ == '__main__':
parser = ArgumentParser(description='Extract Xilinx blackbox cell definitions from ISE and Vivado.')
parser.add_argument('vivado_dir', nargs='?', default='/opt/Xilinx/Vivado/2018.1')
parser.add_argument('ise_dir', nargs='?', default='/opt/Xilinx/ISE/14.7')
args = parser.parse_args()
dirs = [
os.path.join(args.vivado_dir, 'data/verilog/src/xeclib'),
os.path.join(args.vivado_dir, 'data/verilog/src/retarget'),
os.path.join(args.ise_dir, 'ISE_DS/ISE/verilog/xeclib/unisims'),
]
for dir in dirs:
if not os.path.isdir(dir):
print('{} is not a directory'.format(dir))
out = StringIO()
for cell in CELLS:
xtract_cell_decl(cell, dirs, out)
with open('cells_xtra.v', 'w') as f:
f.write('// Created by cells_xtra.py from Xilinx models\n')
f.write('\n')
f.write(out.getvalue())
|
|
"""
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.std_[:, np.newaxis] * s * sc.std_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PacketCapturesOperations:
"""PacketCapturesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
parameters: "_models.PacketCapture",
**kwargs: Any
) -> "_models.PacketCaptureResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PacketCapture')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
parameters: "_models.PacketCapture",
**kwargs: Any
) -> AsyncLROPoller["_models.PacketCaptureResult"]:
"""Create and start a packet capture on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param parameters: Parameters that define the create packet capture operation.
:type parameters: ~azure.mgmt.network.v2019_02_01.models.PacketCapture
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PacketCaptureResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_02_01.models.PacketCaptureResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> "_models.PacketCaptureResult":
"""Gets a packet capture session by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PacketCaptureResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_02_01.models.PacketCaptureResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Stops a specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
async def _get_status_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> "_models.PacketCaptureQueryStatusResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
# Construct URL
url = self._get_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
async def begin_get_status(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.PacketCaptureQueryStatusResult"]:
"""Query the status of a running packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param packet_capture_name: The name given to the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PacketCaptureQueryStatusResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_02_01.models.PacketCaptureQueryStatusResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def list(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PacketCaptureListResult"]:
"""Lists all packet capture sessions within the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PacketCaptureListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_02_01.models.PacketCaptureListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PacketCaptureListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'} # type: ignore
|
|
# Copyright (c) 2015 FUJITSU LIMITED
# Copyright (c) 2012 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
import tempfile
from oslo_utils import units
from jacket.storage import exception
from jacket.storage import test
from jacket.storage.volume import configuration as conf
with mock.patch.dict('sys.modules', pywbem=mock.Mock()):
from jacket.storage.volume.drivers.fujitsu import eternus_dx_common as dx_common
from jacket.storage.volume.drivers.fujitsu import eternus_dx_fc as dx_fc
from jacket.storage.volume.drivers.fujitsu import eternus_dx_iscsi as dx_iscsi
CONFIG_FILE_NAME = 'cinder_fujitsu_eternus_dx.xml'
STORAGE_SYSTEM = '172.16.0.2'
CONF = """<?xml version='1.0' encoding='UTF-8'?>
<FUJITSU>
<EternusIP>172.16.0.2</EternusIP>
<EternusPort>5988</EternusPort>
<EternusUser>testuser</EternusUser>
<EternusPassword>testpass</EternusPassword>
<EternusISCSIIP>10.0.0.3</EternusISCSIIP>
<EternusPool>abcd1234_TPP</EternusPool>
<EternusSnapPool>abcd1234_OSVD</EternusSnapPool>
</FUJITSU>"""
TEST_VOLUME = {
'id': '3d6eeb5d-109b-4435-b891-d01415178490',
'name': 'volume1',
'display_name': 'volume1',
'provider_location': None,
'volume_metadata': [],
'size': 1,
}
TEST_SNAP = {
'id': 'f47a8da3-d9e2-46aa-831f-0ef04158d5a1',
'volume_name': 'volume-3d6eeb5d-109b-4435-b891-d01415178490',
'name': 'snap1',
'display_name': 'test_snapshot',
'volume': TEST_VOLUME,
'volume_id': '3d6eeb5d-109b-4435-b891-d01415178490',
}
TEST_CLONE = {
'name': 'clone1',
'size': 1,
'volume_name': 'vol1',
'id': '391fb914-8a55-4384-a747-588641db3b15',
'project_id': 'project',
'display_name': 'clone1',
'display_description': 'volume created from snapshot',
'volume_metadata': [],
}
ISCSI_INITIATOR = 'iqn.1993-08.org.debian:01:8261afe17e4c'
ISCSI_TARGET_IP = '10.0.0.3'
ISCSI_TARGET_IQN = 'iqn.2000-09.com.fujitsu:storage-system.eternus-dxl:0'
FC_TARGET_WWN = ['500000E0DA000001', '500000E0DA000002']
TEST_WWPN = ['0123456789111111', '0123456789222222']
TEST_CONNECTOR = {'initiator': ISCSI_INITIATOR, 'wwpns': TEST_WWPN}
STOR_CONF_SVC = 'FUJITSU_StorageConfigurationService'
CTRL_CONF_SVC = 'FUJITSU_ControllerConfigurationService'
REPL_SVC = 'FUJITSU_ReplicationService'
STOR_VOL = 'FUJITSU_StorageVolume'
SCSI_PROT_CTR = 'FUJITSU_AffinityGroupController'
STOR_HWID = 'FUJITSU_StorageHardwareID'
STOR_HWID_MNG_SVC = 'FUJITSU_StorageHardwareIDManagementService'
STOR_POOL = 'FUJITSU_RAIDStoragePool'
STOR_POOLS = ['FUJITSU_ThinProvisioningPool', 'FUJITSU_RAIDStoragePool']
AUTH_PRIV = 'FUJITSU_AuthorizedPrivilege'
STOR_SYNC = 'FUJITSU_StorageSynchronized'
PROT_CTRL_UNIT = 'CIM_ProtocolControllerForUnit'
STORAGE_TYPE = 'abcd1234_TPP'
LUNMASKCTRL_IDS = ['AFG0010_CM00CA00P00', 'AFG0011_CM01CA00P00']
MAP_STAT = '0'
VOL_STAT = '0'
FAKE_CAPACITY = 1170368102400
FAKE_LUN_ID1 = '600000E00D2A0000002A011500140000'
FAKE_LUN_NO1 = '0x0014'
FAKE_LUN_ID2 = '600000E00D2A0000002A0115001E0000'
FAKE_LUN_NO2 = '0x001E'
FAKE_SYSTEM_NAME = 'ET603SA4621302115'
FAKE_STATS = {
'vendor_name': 'FUJITSU',
'total_capacity_gb': FAKE_CAPACITY / units.Gi,
'free_capacity_gb': FAKE_CAPACITY / units.Gi,
}
FAKE_KEYBIND1 = {
'CreationClassName': 'FUJITSU_StorageVolume',
'SystemName': STORAGE_SYSTEM,
'DeviceID': FAKE_LUN_ID1,
'SystemCreationClassName': 'FUJITSU_StorageComputerSystem',
}
FAKE_LOCATION1 = {
'classname': 'FUJITSU_StorageVolume',
'keybindings': FAKE_KEYBIND1,
}
FAKE_LUN_META1 = {
'FJ_Pool_Type': 'Thinporvisioning_POOL',
'FJ_Volume_No': FAKE_LUN_NO1,
'FJ_Volume_Name': u'FJosv_0qJ4rpOHgFE8ipcJOMfBmg==',
'FJ_Pool_Name': STORAGE_TYPE,
'FJ_Backend': FAKE_SYSTEM_NAME,
}
FAKE_MODEL_INFO1 = {
'provider_location': six.text_type(FAKE_LOCATION1),
'metadata': FAKE_LUN_META1,
}
FAKE_KEYBIND2 = {
'CreationClassName': 'FUJITSU_StorageVolume',
'SystemName': STORAGE_SYSTEM,
'DeviceID': FAKE_LUN_ID2,
'SystemCreationClassName': 'FUJITSU_StorageComputerSystem',
}
FAKE_LOCATION2 = {
'classname': 'FUJITSU_StorageVolume',
'keybindings': FAKE_KEYBIND2,
}
FAKE_SNAP_INFO = {'provider_location': six.text_type(FAKE_LOCATION2)}
FAKE_LUN_META2 = {
'FJ_Pool_Type': 'Thinporvisioning_POOL',
'FJ_Volume_No': FAKE_LUN_NO1,
'FJ_Volume_Name': u'FJosv_UkCZqMFZW3SU_JzxjHiKfg==',
'FJ_Pool_Name': STORAGE_TYPE,
'FJ_Backend': FAKE_SYSTEM_NAME,
}
FAKE_MODEL_INFO2 = {
'provider_location': six.text_type(FAKE_LOCATION1),
'metadata': FAKE_LUN_META2,
}
class FJ_StorageVolume(dict):
pass
class FJ_StoragePool(dict):
pass
class FJ_AffinityGroupController(dict):
pass
class FakeCIMInstanceName(dict):
def fake_create_eternus_instance_name(self, classname, bindings):
instancename = FakeCIMInstanceName()
for key in bindings:
instancename[key] = bindings[key]
instancename.classname = classname
instancename.namespace = 'root/eternus'
return instancename
class FakeEternusConnection(object):
def InvokeMethod(self, MethodName, Service, ElementName=None, InPool=None,
ElementType=None, TheElement=None, LUNames=None,
Size=None, Type=None, Mode=None, Locality=None,
InitiatorPortIDs=None, TargetPortIDs=None,
DeviceAccesses=None, SyncType=None,
SourceElement=None, TargetElement=None,
Operation=None, CopyType=None,
Synchronization=None, ProtocolControllers=None,
TargetPool=None):
global MAP_STAT, VOL_STAT
if MethodName == 'CreateOrModifyElementFromStoragePool':
VOL_STAT = '1'
rc = 0
vol = self._enum_volumes()
job = {'TheElement': vol[0].path}
elif MethodName == 'ReturnToStoragePool':
VOL_STAT = '0'
rc = 0
job = {}
elif MethodName == 'GetReplicationRelationships':
rc = 0
job = {'Synchronizations': []}
elif MethodName == 'ExposePaths':
MAP_STAT = '1'
rc = 0
job = {}
elif MethodName == 'HidePaths':
MAP_STAT = '0'
rc = 0
job = {}
elif MethodName == 'CreateElementReplica':
rc = 0
snap = self._enum_snapshots()
job = {'TargetElement': snap[0].path}
elif MethodName == 'CreateReplica':
rc = 0
snap = self._enum_snapshots()
job = {'TargetElement': snap[0].path}
elif MethodName == 'ModifyReplicaSynchronization':
rc = 0
job = {}
else:
raise exception.VolumeBackendAPIException(data="invoke method")
return (rc, job)
def EnumerateInstanceNames(self, name):
result = []
if name == 'FUJITSU_StorageVolume':
result = self._enum_volumes()
elif name == 'FUJITSU_StorageConfigurationService':
result = self._enum_confservice()
elif name == 'FUJITSU_ReplicationService':
result = self._enum_repservice()
elif name == 'FUJITSU_ControllerConfigurationService':
result = self._enum_ctrlservice()
elif name == 'FUJITSU_AffinityGroupController':
result = self._enum_afntyservice()
elif name == 'FUJITSU_StorageHardwareIDManagementService':
result = self._enum_sthwidmngsvc()
elif name == 'CIM_ProtocolControllerForUnit':
result = self._ref_unitnames()
elif name == 'CIM_StoragePool':
result = self._enum_pools()
elif name == 'FUJITSU_SCSIProtocolEndpoint':
result = self._enum_scsiport_endpoint()
elif name == 'FUJITSU_IPProtocolEndpoint':
result = self._enum_ipproto_endpoint()
return result
def EnumerateInstances(self, name):
result = None
if name == 'FUJITSU_StorageProduct':
result = self._enum_sysnames()
elif name == 'FUJITSU_RAIDStoragePool':
result = self._enum_pool_details('RAID')
elif name == 'FUJITSU_ThinProvisioningPool':
result = self._enum_pool_details('TPP')
elif name == 'FUJITSU_SCSIProtocolEndpoint':
result = self._enum_scsiport_endpoint()
elif name == 'FUJITSU_iSCSIProtocolEndpoint':
result = self._enum_iscsiprot_endpoint()
elif name == 'FUJITSU_StorageHardwareID':
result = self._enum_sthwid()
elif name == 'CIM_SCSIProtocolEndpoint':
result = self._enum_scsiport_endpoint()
elif name == 'FUJITSU_StorageHardwareID':
result = None
else:
result = None
return result
def GetInstance(self, objectpath, LocalOnly=False):
try:
name = objectpath['CreationClassName']
except KeyError:
name = objectpath.classname
result = None
if name == 'FUJITSU_StorageVolume':
result = self._getinstance_storagevolume(objectpath)
elif name == 'FUJITSU_IPProtocolEndpoint':
result = self._getinstance_ipprotocolendpoint(objectpath)
elif name == 'CIM_ProtocolControllerForUnit':
result = self._getinstance_unit(objectpath)
elif name == 'FUJITSU_AffinityGroupController':
result = self._getinstance_unit(objectpath)
return result
def Associators(self, objectpath, AssocClass=None,
ResultClass='FUJITSU_StorageHardwareID'):
result = None
if ResultClass == 'FUJITSU_StorageHardwareID':
result = self._assoc_hdwid()
elif ResultClass == 'FUJITSU_iSCSIProtocolEndpoint':
result = self._assoc_endpoint(objectpath)
elif ResultClass == 'FUJITSU_StorageVolume':
result = self._assoc_storagevolume(objectpath)
elif ResultClass == 'FUJITSU_AuthorizedPrivilege':
result = self._assoc_authpriv()
else:
result = self._default_assoc(objectpath)
return result
def AssociatorNames(self, objectpath, AssocClass=None,
ResultClass=SCSI_PROT_CTR):
result = None
if ResultClass == SCSI_PROT_CTR:
result = self._assocnames_lunmaskctrl()
elif ResultClass == 'FUJITSU_TCPProtocolEndpoint':
result = self._assocnames_tcp_endpoint()
elif ResultClass == 'FUJITSU_AffinityGroupController':
result = self._assocnames_afngroup()
else:
result = self._default_assocnames(objectpath)
return result
def ReferenceNames(self, objectpath,
ResultClass='CIM_ProtocolControllerForUnit'):
result = []
if ResultClass == 'CIM_ProtocolControllerForUnit':
if MAP_STAT == '1':
result = self._ref_unitnames()
else:
result = []
elif ResultClass == 'FUJITSU_StorageSynchronized':
result = self._ref_storage_sync()
else:
result = self._default_ref(objectpath)
return result
def _ref_unitnames(self):
unitnames = []
unitname = FJ_AffinityGroupController()
dependent = {}
dependent['CreationClassName'] = STOR_VOL
dependent['DeviceID'] = FAKE_LUN_ID1
dependent['SystemName'] = STORAGE_SYSTEM
antecedent = {}
antecedent['CreationClassName'] = SCSI_PROT_CTR
antecedent['DeviceID'] = LUNMASKCTRL_IDS[0]
antecedent['SystemName'] = STORAGE_SYSTEM
unitname['Dependent'] = dependent
unitname['Antecedent'] = antecedent
unitname['CreationClassName'] = PROT_CTRL_UNIT
unitname.path = unitname
unitnames.append(unitname)
unitname2 = FJ_AffinityGroupController()
dependent2 = {}
dependent2['CreationClassName'] = STOR_VOL
dependent2['DeviceID'] = FAKE_LUN_ID1
dependent2['SystemName'] = STORAGE_SYSTEM
antecedent2 = {}
antecedent2['CreationClassName'] = SCSI_PROT_CTR
antecedent2['DeviceID'] = LUNMASKCTRL_IDS[1]
antecedent2['SystemName'] = STORAGE_SYSTEM
unitname2['Dependent'] = dependent2
unitname2['Antecedent'] = antecedent2
unitname2['CreationClassName'] = PROT_CTRL_UNIT
unitname2.path = unitname2
unitnames.append(unitname2)
return unitnames
def _ref_storage_sync(self):
syncnames = []
return syncnames
def _default_ref(self, objectpath):
return objectpath
def _default_assoc(self, objectpath):
return objectpath
def _assocnames_lunmaskctrl(self):
return self._enum_lunmaskctrls()
def _assocnames_tcp_endpoint(self):
return self._enum_tcp_endpoint()
def _assocnames_afngroup(self):
return self._enum_afntyservice()
def _default_assocnames(self, objectpath):
return objectpath
def _assoc_authpriv(self):
authprivs = []
iscsi = {}
iscsi['InstanceID'] = ISCSI_INITIATOR
authprivs.append(iscsi)
fc = {}
fc['InstanceID'] = TEST_WWPN[0]
authprivs.append(fc)
fc1 = {}
fc1['InstanceID'] = TEST_WWPN[1]
authprivs.append(fc1)
return authprivs
def _assoc_endpoint(self, objectpath):
targetlist = []
tgtport1 = {}
tgtport1['CreationClassName'] = 'FUJITSU_IPProtocolEndpoint'
tgtport1['Name'] = ('iqn.2000-09.com.fujitsu:storage-system.'
'eternus-dxl:0123456789,t,0x0009')
targetlist.append(tgtport1)
return targetlist
def _getinstance_unit(self, objectpath):
unit = FJ_AffinityGroupController()
unit.path = None
if MAP_STAT == '0':
return unit
dependent = {}
dependent['CreationClassName'] = STOR_VOL
dependent['DeviceID'] = FAKE_LUN_ID1
dependent['ElementName'] = TEST_VOLUME['name']
dependent['SystemName'] = STORAGE_SYSTEM
antecedent = {}
antecedent['CreationClassName'] = SCSI_PROT_CTR
antecedent['DeviceID'] = LUNMASKCTRL_IDS[0]
antecedent['SystemName'] = STORAGE_SYSTEM
unit['Dependent'] = dependent
unit['Antecedent'] = antecedent
unit['CreationClassName'] = PROT_CTRL_UNIT
unit['DeviceNumber'] = '0'
unit.path = unit
return unit
def _enum_sysnames(self):
sysnamelist = []
sysname = {}
sysname['IdentifyingNumber'] = FAKE_SYSTEM_NAME
sysnamelist.append(sysname)
return sysnamelist
def _enum_confservice(self):
services = []
service = {}
service['Name'] = 'FUJITSU:ETERNUS SMI-S Agent'
service['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem'
service['SystemName'] = STORAGE_SYSTEM
service['CreationClassName'] = 'FUJITSU_StorageConfigurationService'
services.append(service)
return services
def _enum_ctrlservice(self):
services = []
service = {}
service['SystemName'] = STORAGE_SYSTEM
service['CreationClassName'] = 'FUJITSU_ControllerConfigurationService'
services.append(service)
return services
def _enum_afntyservice(self):
services = []
service = {}
service['SystemName'] = STORAGE_SYSTEM
service['CreationClassName'] = 'FUJITSU_AffinityGroupController'
services.append(service)
return services
def _enum_repservice(self):
services = []
service = {}
service['Name'] = 'FUJITSU:ETERNUS SMI-S Agent'
service['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem'
service['SystemName'] = STORAGE_SYSTEM
service['CreationClassName'] = 'FUJITSU_ReplicationService'
services.append(service)
return services
def _enum_pools(self):
pools = []
pool = {}
pool['InstanceID'] = 'FUJITSU:RSP0004'
pool['CreationClassName'] = 'FUJITSU_RAIDStoragePool'
pools.append(pool)
pool2 = {}
pool2['InstanceID'] = 'FUJITSU:TPP0004'
pool2['CreationClassName'] = 'FUJITSU_ThinProvisioningPool'
pools.append(pool2)
return pools
def _enum_pool_details(self, pooltype):
pools = []
pool = FJ_StoragePool()
if pooltype == 'RAID':
pool['InstanceID'] = 'FUJITSU:RSP0004'
pool['CreationClassName'] = 'FUJITSU_RAIDStoragePool'
pool['ElementName'] = 'abcd1234_OSVD'
pool['TotalManagedSpace'] = 1170368102400
pool['RemainingManagedSpace'] = 1170368102400
pool.path = pool
pool.path.classname = 'FUJITSU_RAIDStoragePool'
else:
pool = FJ_StoragePool()
pool['InstanceID'] = 'FUJITSU:TPP0004'
pool['CreationClassName'] = 'FUJITSU_ThinProvisioningPool'
pool['ElementName'] = 'abcd1234_TPP'
pool['TotalManagedSpace'] = 1170368102400
pool['RemainingManagedSpace'] = 1170368102400
pool.path = pool
pool.path.classname = 'FUJITSU_ThinProvisioningPool'
pools.append(pool)
return pools
def _enum_volumes(self):
volumes = []
if VOL_STAT == '0':
return volumes
volume = FJ_StorageVolume()
volume['name'] = TEST_VOLUME['name']
volume['CreationClassName'] = 'FUJITSU_StorageVolume'
volume['Name'] = FAKE_LUN_ID1
volume['DeviceID'] = FAKE_LUN_ID1
volume['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem'
volume['SystemName'] = STORAGE_SYSTEM
volume['ElementName'] = 'FJosv_0qJ4rpOHgFE8ipcJOMfBmg=='
volume['volume_type_id'] = None
volume.path = volume
volume.path.classname = volume['CreationClassName']
name = {}
name['classname'] = 'FUJITSU_StorageVolume'
keys = {}
keys['CreationClassName'] = 'FUJITSU_StorageVolume'
keys['SystemName'] = STORAGE_SYSTEM
keys['DeviceID'] = volume['DeviceID']
keys['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem'
name['keybindings'] = keys
volume['provider_location'] = str(name)
volumes.append(volume)
snap_vol = FJ_StorageVolume()
snap_vol['name'] = TEST_SNAP['name']
snap_vol['CreationClassName'] = 'FUJITSU_StorageVolume'
snap_vol['Name'] = FAKE_LUN_ID2
snap_vol['DeviceID'] = FAKE_LUN_ID2
snap_vol['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem'
snap_vol['SystemName'] = STORAGE_SYSTEM
snap_vol['ElementName'] = 'FJosv_OgEZj1mSvKRvIKOExKktlg=='
snap_vol.path = snap_vol
snap_vol.path.classname = snap_vol['CreationClassName']
name2 = {}
name2['classname'] = 'FUJITSU_StorageVolume'
keys2 = {}
keys2['CreationClassName'] = 'FUJITSU_StorageVolume'
keys2['SystemName'] = STORAGE_SYSTEM
keys2['DeviceID'] = snap_vol['DeviceID']
keys2['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem'
name2['keybindings'] = keys2
snap_vol['provider_location'] = str(name2)
volumes.append(snap_vol)
clone_vol = FJ_StorageVolume()
clone_vol['name'] = TEST_CLONE['name']
clone_vol['CreationClassName'] = 'FUJITSU_StorageVolume'
clone_vol['ElementName'] = TEST_CLONE['name']
clone_vol['DeviceID'] = FAKE_LUN_ID2
clone_vol['SystemName'] = STORAGE_SYSTEM
clone_vol['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem'
clone_vol.path = clone_vol
clone_vol.path.classname = clone_vol['CreationClassName']
volumes.append(clone_vol)
return volumes
def _enum_snapshots(self):
snapshots = []
snap = FJ_StorageVolume()
snap['CreationClassName'] = 'FUJITSU_StorageVolume'
snap['SystemName'] = STORAGE_SYSTEM
snap['DeviceID'] = FAKE_LUN_ID2
snap['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem'
snap.path = snap
snap.path.classname = snap['CreationClassName']
snapshots.append(snap)
return snapshots
def _enum_lunmaskctrls(self):
ctrls = []
ctrl = {}
ctrl2 = {}
if MAP_STAT == '1':
ctrl['CreationClassName'] = SCSI_PROT_CTR
ctrl['SystemName'] = STORAGE_SYSTEM
ctrl['DeviceID'] = LUNMASKCTRL_IDS[0]
ctrls.append(ctrl)
ctrl2['CreationClassName'] = SCSI_PROT_CTR
ctrl2['SystemName'] = STORAGE_SYSTEM
ctrl2['DeviceID'] = LUNMASKCTRL_IDS[1]
ctrls.append(ctrl2)
return ctrls
def _enum_scsiport_endpoint(self):
targetlist = []
tgtport1 = {}
tgtport1['Name'] = '1234567890000021'
tgtport1['CreationClassName'] = 'FUJITSU_SCSIProtocolEndpoint'
tgtport1['ConnectionType'] = 2
tgtport1['RAMode'] = 0
targetlist.append(tgtport1)
return targetlist
def _enum_ipproto_endpoint(self):
targetlist = []
tgtport1 = {}
tgtport1['CreationClassName'] = 'FUJITSU_IPProtocolEndpoint'
tgtport1['NAME'] = 'IP_CM01CA00P00_00'
targetlist.append(tgtport1)
return targetlist
def _enum_tcp_endpoint(self):
targetlist = []
tgtport1 = {}
tgtport1['CreationClassName'] = 'FUJITSU_TCPProtocolEndpoint'
tgtport1['NAME'] = 'TCP_CM01CA00P00_00'
targetlist.append(tgtport1)
return targetlist
def _enum_iscsiprot_endpoint(self):
targetlist = []
tgtport1 = {}
tgtport1['Name'] = ('iqn.2000-09.com.fujitsu:storage-system.'
'eternus-dxl:0123456789,t,0x0009')
tgtport1['ConnectionType'] = 7
tgtport1['RAMode'] = 0
targetlist.append(tgtport1)
return targetlist
def _getinstance_storagevolume(self, objpath):
foundinstance = None
instance = FJ_StorageVolume()
volumes = self._enum_volumes()
for volume in volumes:
if volume['DeviceID'] == objpath['DeviceID']:
instance = volume
break
if not instance:
foundinstance = None
else:
foundinstance = instance
return foundinstance
def _getinstance_ipprotocolendpoint(self, objpath):
instance = {}
instance['IPv4Address'] = '10.0.0.3'
return instance
class FJFCDriverTestCase(test.TestCase):
def __init__(self, *args, **kwargs):
super(FJFCDriverTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(FJFCDriverTestCase, self).setUp()
# Make fake xml-configuration file.
self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml')
self.addCleanup(self.config_file.close)
self.config_file.write(CONF)
self.config_file.flush()
# Make fake Object by using mock as configuration object.
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.cinder_eternus_config_file = self.config_file.name
self.stubs.Set(dx_common.FJDXCommon, '_get_eternus_connection',
self.fake_eternus_connection)
instancename = FakeCIMInstanceName()
self.stubs.Set(dx_common.FJDXCommon, '_create_eternus_instance_name',
instancename.fake_create_eternus_instance_name)
# Set iscsi driver to self.driver.
driver = dx_fc.FJDXFCDriver(configuration=self.configuration)
self.driver = driver
def fake_eternus_connection(self):
conn = FakeEternusConnection()
return conn
def test_get_volume_stats(self):
ret = self.driver.get_volume_stats(True)
stats = {'vendor_name': ret['vendor_name'],
'total_capacity_gb': ret['total_capacity_gb'],
'free_capacity_gb': ret['free_capacity_gb']}
self.assertEqual(FAKE_STATS, stats)
def test_create_and_delete_volume(self):
model_info = self.driver.create_volume(TEST_VOLUME)
self.assertEqual(FAKE_MODEL_INFO1, model_info)
self.driver.delete_volume(TEST_VOLUME)
@mock.patch.object(dx_common.FJDXCommon, '_get_mapdata')
def test_map_unmap(self, mock_mapdata):
fake_data = {'target_wwn': FC_TARGET_WWN,
'target_lun': 0}
mock_mapdata.return_value = fake_data
fake_mapdata = dict(fake_data)
fake_mapdata['initiator_target_map'] = {
initiator: FC_TARGET_WWN for initiator in TEST_WWPN
}
fake_mapdata['volume_id'] = TEST_VOLUME['id']
fake_mapdata['target_discovered'] = True
fake_info = {'driver_volume_type': 'fibre_channel',
'data': fake_mapdata}
model_info = self.driver.create_volume(TEST_VOLUME)
self.assertEqual(FAKE_MODEL_INFO1, model_info)
info = self.driver.initialize_connection(TEST_VOLUME,
TEST_CONNECTOR)
self.assertEqual(fake_info, info)
self.driver.terminate_connection(TEST_VOLUME,
TEST_CONNECTOR)
self.driver.delete_volume(TEST_VOLUME)
def test_create_and_delete_snapshot(self):
model_info = self.driver.create_volume(TEST_VOLUME)
self.assertEqual(FAKE_MODEL_INFO1, model_info)
snap_info = self.driver.create_snapshot(TEST_SNAP)
self.assertEqual(FAKE_SNAP_INFO, snap_info)
self.driver.delete_snapshot(TEST_SNAP)
self.driver.delete_volume(TEST_VOLUME)
def test_create_volume_from_snapshot(self):
model_info = self.driver.create_volume(TEST_VOLUME)
self.assertEqual(FAKE_MODEL_INFO1, model_info)
snap_info = self.driver.create_snapshot(TEST_SNAP)
self.assertEqual(FAKE_SNAP_INFO, snap_info)
model_info = self.driver.create_volume_from_snapshot(TEST_CLONE,
TEST_SNAP)
self.assertEqual(FAKE_MODEL_INFO2, model_info)
self.driver.delete_snapshot(TEST_SNAP)
self.driver.delete_volume(TEST_CLONE)
self.driver.delete_volume(TEST_VOLUME)
def test_create_cloned_volume(self):
model_info = self.driver.create_volume(TEST_VOLUME)
self.assertEqual(FAKE_MODEL_INFO1, model_info)
model_info = self.driver.create_cloned_volume(TEST_CLONE, TEST_VOLUME)
self.assertEqual(FAKE_MODEL_INFO2, model_info)
self.driver.delete_volume(TEST_CLONE)
self.driver.delete_volume(TEST_VOLUME)
def test_extend_volume(self):
model_info = self.driver.create_volume(TEST_VOLUME)
self.assertEqual(FAKE_MODEL_INFO1, model_info)
self.driver.extend_volume(TEST_VOLUME, 10)
class FJISCSIDriverTestCase(test.TestCase):
def __init__(self, *args, **kwargs):
super(FJISCSIDriverTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(FJISCSIDriverTestCase, self).setUp()
# Make fake xml-configuration file.
self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml')
self.addCleanup(self.config_file.close)
self.config_file.write(CONF)
self.config_file.flush()
# Make fake Object by using mock as configuration object.
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.cinder_eternus_config_file = self.config_file.name
self.stubs.Set(dx_common.FJDXCommon, '_get_eternus_connection',
self.fake_eternus_connection)
instancename = FakeCIMInstanceName()
self.stubs.Set(dx_common.FJDXCommon, '_create_eternus_instance_name',
instancename.fake_create_eternus_instance_name)
self.stubs.Set(dx_common.FJDXCommon, '_get_mapdata_iscsi',
self.fake_get_mapdata)
# Set iscsi driver to self.driver.
driver = dx_iscsi.FJDXISCSIDriver(configuration=self.configuration)
self.driver = driver
def fake_eternus_connection(self):
conn = FakeEternusConnection()
return conn
def fake_get_mapdata(self, vol_instance, connector, target_portlist):
multipath = connector.get('multipath', False)
if multipath:
return {'target_portals': [ISCSI_TARGET_IP],
'target_iqns': [ISCSI_TARGET_IQN],
'target_luns': [0]}
else:
return {'target_portal': ISCSI_TARGET_IP,
'target_iqns': ISCSI_TARGET_IQN,
'target_lun': 0}
def test_get_volume_stats(self):
ret = self.driver.get_volume_stats(True)
stats = {'vendor_name': ret['vendor_name'],
'total_capacity_gb': ret['total_capacity_gb'],
'free_capacity_gb': ret['free_capacity_gb']}
self.assertEqual(FAKE_STATS, stats)
def test_create_and_delete_volume(self):
model_info = self.driver.create_volume(TEST_VOLUME)
self.assertEqual(FAKE_MODEL_INFO1, model_info)
self.driver.delete_volume(TEST_VOLUME)
def test_map_unmap(self):
fake_mapdata = self.fake_get_mapdata(None, {}, None)
fake_mapdata['volume_id'] = TEST_VOLUME['id']
fake_mapdata['target_discovered'] = True
fake_info = {'driver_volume_type': 'iscsi',
'data': fake_mapdata}
model_info = self.driver.create_volume(TEST_VOLUME)
self.assertEqual(FAKE_MODEL_INFO1, model_info)
info = self.driver.initialize_connection(TEST_VOLUME,
TEST_CONNECTOR)
self.assertEqual(fake_info, info)
self.driver.terminate_connection(TEST_VOLUME,
TEST_CONNECTOR)
self.driver.delete_volume(TEST_VOLUME)
def test_create_and_delete_snapshot(self):
model_info = self.driver.create_volume(TEST_VOLUME)
self.assertEqual(FAKE_MODEL_INFO1, model_info)
snap_info = self.driver.create_snapshot(TEST_SNAP)
self.assertEqual(FAKE_SNAP_INFO, snap_info)
self.driver.delete_snapshot(TEST_SNAP)
self.driver.delete_volume(TEST_VOLUME)
def test_create_volume_from_snapshot(self):
model_info = self.driver.create_volume(TEST_VOLUME)
self.assertEqual(FAKE_MODEL_INFO1, model_info)
snap_info = self.driver.create_snapshot(TEST_SNAP)
self.assertEqual(FAKE_SNAP_INFO, snap_info)
model_info = self.driver.create_volume_from_snapshot(TEST_CLONE,
TEST_SNAP)
self.assertEqual(FAKE_MODEL_INFO2, model_info)
self.driver.delete_snapshot(TEST_SNAP)
self.driver.delete_volume(TEST_CLONE)
self.driver.delete_volume(TEST_VOLUME)
def test_create_cloned_volume(self):
model_info = self.driver.create_volume(TEST_VOLUME)
self.assertEqual(FAKE_MODEL_INFO1, model_info)
model_info = self.driver.create_cloned_volume(TEST_CLONE, TEST_VOLUME)
self.assertEqual(FAKE_MODEL_INFO2, model_info)
self.driver.delete_volume(TEST_CLONE)
self.driver.delete_volume(TEST_VOLUME)
def test_extend_volume(self):
model_info = self.driver.create_volume(TEST_VOLUME)
self.assertEqual(FAKE_MODEL_INFO1, model_info)
self.driver.extend_volume(TEST_VOLUME, 10)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualMachineScaleSetExtensionsOperations(object):
"""VirtualMachineScaleSetExtensionsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-03-30".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-30"
self.config = config
def _create_or_update_initial(
self, resource_group_name, vm_scale_set_name, vmss_extension_name, extension_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'vmssExtensionName': self._serialize.url("vmss_extension_name", vmss_extension_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(extension_parameters, 'VirtualMachineScaleSetExtension')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineScaleSetExtension', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachineScaleSetExtension', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, vm_scale_set_name, vmss_extension_name, extension_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""The operation to create or update an extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set where the
extension should be create or updated.
:type vm_scale_set_name: str
:param vmss_extension_name: The name of the VM scale set extension.
:type vmss_extension_name: str
:param extension_parameters: Parameters supplied to the Create VM
scale set Extension operation.
:type extension_parameters:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetExtension
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
VirtualMachineScaleSetExtension or
ClientRawResponse<VirtualMachineScaleSetExtension> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetExtension]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetExtension]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
extension_parameters=extension_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualMachineScaleSetExtension', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}'}
def _delete_initial(
self, resource_group_name, vm_scale_set_name, vmss_extension_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'vmssExtensionName': self._serialize.url("vmss_extension_name", vmss_extension_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, vm_scale_set_name, vmss_extension_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""The operation to delete the extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set where the
extension should be deleted.
:type vm_scale_set_name: str
:param vmss_extension_name: The name of the VM scale set extension.
:type vmss_extension_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns OperationStatusResponse
or ClientRawResponse<OperationStatusResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}'}
def get(
self, resource_group_name, vm_scale_set_name, vmss_extension_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""The operation to get the extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set containing the
extension.
:type vm_scale_set_name: str
:param vmss_extension_name: The name of the VM scale set extension.
:type vmss_extension_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualMachineScaleSetExtension or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetExtension
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'vmssExtensionName': self._serialize.url("vmss_extension_name", vmss_extension_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineScaleSetExtension', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}'}
def list(
self, resource_group_name, vm_scale_set_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of all extensions in a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set containing the
extension.
:type vm_scale_set_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualMachineScaleSetExtension
:rtype:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetExtensionPaged[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetExtension]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualMachineScaleSetExtensionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualMachineScaleSetExtensionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions'}
|
|
'''
* Copyright (c) 2011, University of Kent
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 1. Neither the name of the University of Kent nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 2. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED.
*
* 3. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* 4. YOU AGREE THAT THE EXCLUSIONS IN PARAGRAPHS 2 AND 3 ABOVE ARE REASONABLE
* IN THE CIRCUMSTANCES. IN PARTICULAR, YOU ACKNOWLEDGE (1) THAT THIS
* SOFTWARE HAS BEEN MADE AVAILABLE TO YOU FREE OF CHARGE, (2) THAT THIS
* SOFTWARE IS NOT "PRODUCT" QUALITY, BUT HAS BEEN PRODUCED BY A RESEARCH
* GROUP WHO DESIRE TO MAKE THIS SOFTWARE FREELY AVAILABLE TO PEOPLE WHO WISH
* TO USE IT, AND (3) THAT BECAUSE THIS SOFTWARE IS NOT OF "PRODUCT" QUALITY
* IT IS INEVITABLE THAT THERE WILL BE BUGS AND ERRORS, AND POSSIBLY MORE
* SERIOUS FAULTS, IN THIS SOFTWARE.
*
* 5. This license is governed, except to the extent that local laws
* necessarily apply, by the laws of England and Wales.
'''
'''
Created on 1 Feb 2013
@author: Kristy Siu
'''
import logging
import tempfile
import urlparse
import sys
import uuid
sys.path.insert(0, '../')
import dm.xmlsec.binding as xmlsec
xmlsec.initialize()
from os.path import dirname, basename
from lxml.etree import parse,tostring,fromstring,ElementTree
from time import localtime, strftime, gmtime
import urllib
import webbrowser
import urllib2
import zlib
import base64
import webob.dec
import webob.exc
import json
import re
from keystone.common import cms
from keystone import config
from keystone.contrib import mapping
from keystone import catalog
from keystone import exception
LOG = logging.getLogger(__name__)
class RequestIssuingService(object):
def __init__(self):
self.tmplReq = '{"auth":{"passwordCredentials": {"username":"", "password":""}}}'
def getIdPRequest(self,key, issuer, endpoint):
LOG.info('IssueRequest')
resp = {}
resp['idpRequest'] = self.tmplReq
resp['idpEndpoint'] = endpoint
return valid_Response(resp)
def __call__(self):
return None
class Negotiator(object):
def __init__(self):
""" do nothing """
raise exception.NotImplemented()
def negotiate(self, data):
""" do nothing """
raise exception.NotImplemented()
class CredentialValidator(object):
def __init__(self):
self.org_mapping_api = mapping.controllers.OrgMappingController()
self.mapping_api = mapping.controllers.AttributeMappingController()
def __call__(self):
return None
def validate(self, response, realm_id):
catalog_api = catalog.controllers.EndpointV3()
context = {}
context['is_admin'] = True
context['query_string'] = {}
context['query_string']['service_id'] = realm_id
context['interface'] = 'adminurl'
context['path'] = ""
endpoints = catalog_api.list_endpoints(context)
for e in endpoints['endpoints']:
creds = e["creds"]
if e['interface'] == 'admin':
endpoint = e['url']+'/tokens/'
if e['interface'] == 'public':
post_endpoint = e['url']+'/tokens'
token_id = response['access']['token']['id']
if not cms.is_ans1_token(token_id):
auth_req = {"auth":{}}
auth_req["auth"]["tenantName"] = "service"
auth_req['auth']['passwordCredentials'] = {"username": creds["user"], "password": creds["pass"]}
auth_token = self.request(post_endpoint, data=auth_req, method="POST")
header = {"X-Auth-Token": auth_token['access']['token']['id']}
validatedResponse = self.request(keystoneEndpoint=endpoint, data=token_id, method="GET", header=header)
else:
cert_file = tempfile.NamedTemporaryFile()
cert_file.write(self.format_certdata(creds["certdata"]))
cert_file.flush()
cacert_file = tempfile.NamedTemporaryFile()
cacert_file.write(self.format_certdata(creds["cacert"]))
cacert_file.flush()
data = json.loads(cms.cms_verify(cms.token_to_cms(token_id),cert_file.name,cacert_file.name))
cert_file.close()
cacert_file.close()
data['access']['token']['user'] = data['access']['user']
data['access']['token']['metadata'] = data['access']['metadata']
validatedResponse = data
validatedAttributes = {}
for r in validatedResponse['access']['user']['roles']:
if validatedAttributes.get('role') is None:
validatedAttributes['role'] = []
validatedAttributes['role'].append(r['name'])
validatedAttributes['project'] = [validatedResponse['access']['token']['tenant']['name']]
username = validatedResponse['access']['user']['name']
expires = validatedResponse['access']['token']['expires']
return username, expires, self.check_issuers(validatedAttributes, realm_id)
def format_certdata(self, data):
data = re.sub("(.{64})", "\\1\n", data, re.DOTALL)
return "-----BEGIN CERTIFICATE-----\n"+data+"\n-----END CERTIFICATE-----"
## Send a request that will be process by the V2 Keystone
def request(self, keystoneEndpoint=None, data={}, method="GET", header={}):
headers = header
if method == "GET":
req = urllib2.Request(keystoneEndpoint + data, headers = header)
response = urllib2.urlopen(req)
elif method == "POST":
data = json.dumps(data)
headers['Content-Type'] = 'application/json'
req = urllib2.Request(keystoneEndpoint, data, header)
response = urllib2.urlopen(req)
return json.loads(response.read())
def check_issuers(self, atts, realm_id):
context = {"is_admin": True}
valid_atts = {}
LOG.debug("User's Attributes are: ")
LOG.debug(atts)
for att in atts:
for val in atts[att]:
org_atts = self.org_mapping_api.list_org_attributes(context)['org_attributes']
LOG.debug("The retrieved Org Atts are:")
LOG.debug(org_atts)
for org_att in org_atts:
if org_att['type'] == att:
if org_att['value'] == val or org_att['value'] is None:
try:
self.org_mapping_api.check_attribute_can_be_issued(context, service_id=realm_id, org_attribute_id=org_att['id'])
if valid_atts.get(att) is None:
valid_atts[att] = [val]
else:
valid_atts[att].append(val)
except exception.NotFound:
pass
return valid_atts
def valid_Response(response):
resp = webob.Response(content_type='application/json')
resp.body = json.dumps(response)
return resp
def inflate(data):
decompress = zlib.decompressobj(
-zlib.MAX_WBITS # see above
)
inflated = decompress.decompress(data)
inflated += decompress.flush()
return inflated
|
|
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import SkipTest
from troveclient.compat import exceptions
from trove.common.utils import generate_uuid
from trove.common.utils import poll_until
from trove.tests.config import CONFIG
from trove.tests.scenario.helpers.test_helper import DataType
from trove.tests.scenario.runners.test_runners import TestRunner
from trove.tests.util import create_dbaas_client
from trove.tests.util.users import Requirements
class BackupRunner(TestRunner):
def __init__(self):
self.TIMEOUT_BACKUP_CREATE = 60 * 30
self.TIMEOUT_BACKUP_DELETE = 120
super(BackupRunner, self).__init__(sleep_time=20,
timeout=self.TIMEOUT_BACKUP_CREATE)
self.BACKUP_NAME = 'backup_test'
self.BACKUP_DESC = 'test description'
self.backup_host = None
self.backup_info = None
self.backup_count_prior_to_create = 0
self.backup_count_for_instance_prior_to_create = 0
self.incremental_backup_info = None
self.restore_instance_id = 0
self.restore_host = None
self.other_client = None
def run_backup_create_instance_invalid(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
invalid_inst_id = 'invalid-inst-id'
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.backups.create,
self.BACKUP_NAME, invalid_inst_id, self.BACKUP_DESC)
def run_backup_create_instance_not_found(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.backups.create,
self.BACKUP_NAME, generate_uuid(), self.BACKUP_DESC)
def run_add_data_for_backup(self):
self.backup_host = self.get_instance_host()
self.assert_add_data_for_backup(self.backup_host)
def assert_add_data_for_backup(self, host):
"""In order for this to work, the corresponding datastore
'helper' class should implement the 'add_large_data' method.
"""
self.test_helper.add_data(DataType.large, host)
def run_verify_data_for_backup(self):
self.assert_verify_backup_data(self.backup_host)
def assert_verify_backup_data(self, host):
"""In order for this to work, the corresponding datastore
'helper' class should implement the 'verify_large_data' method.
"""
self.test_helper.verify_data(DataType.large, host)
def run_backup_create(self):
self.assert_backup_create()
def assert_backup_create(self):
# Necessary to test that the count increases.
self.backup_count_prior_to_create = len(
self.auth_client.backups.list())
self.backup_count_for_instance_prior_to_create = len(
self.auth_client.instances.backups(self.instance_info.id))
result = self.auth_client.backups.create(
self.BACKUP_NAME, self.instance_info.id, self.BACKUP_DESC)
self.backup_info = result
self.assert_equal(self.BACKUP_NAME, result.name,
'Unexpected backup name')
self.assert_equal(self.BACKUP_DESC, result.description,
'Unexpected backup description')
self.assert_equal(self.instance_info.id, result.instance_id,
'Unexpected instance ID for backup')
self.assert_equal('NEW', result.status,
'Unexpected status for backup')
instance = self.auth_client.instances.get(
self.instance_info.id)
datastore_version = self.auth_client.datastore_versions.get(
self.instance_info.dbaas_datastore,
self.instance_info.dbaas_datastore_version)
self.assert_equal('BACKUP', instance.status,
'Unexpected instance status')
self.assert_equal(self.instance_info.dbaas_datastore,
result.datastore['type'],
'Unexpected datastore')
self.assert_equal(self.instance_info.dbaas_datastore_version,
result.datastore['version'],
'Unexpected datastore version')
self.assert_equal(datastore_version.id, result.datastore['version_id'],
'Unexpected datastore version id')
def run_restore_instance_from_not_completed_backup(
self, expected_exception=exceptions.Conflict,
expected_http_code=409):
self.assert_raises(
expected_exception, expected_http_code,
self._restore_from_backup, self.backup_info.id)
def run_instance_action_right_after_backup_create(
self, expected_exception=exceptions.UnprocessableEntity,
expected_http_code=422):
self.assert_raises(expected_exception, expected_http_code,
self.auth_client.instances.resize_instance,
self.instance_info.id, 1)
def run_backup_create_another_backup_running(
self, expected_exception=exceptions.UnprocessableEntity,
expected_http_code=422):
self.assert_raises(expected_exception, expected_http_code,
self.auth_client.backups.create,
'backup_test2', self.instance_info.id,
'test description2')
def run_backup_delete_while_backup_running(
self, expected_exception=exceptions.UnprocessableEntity,
expected_http_code=422):
result = self.auth_client.backups.list()
backup = result[0]
self.assert_raises(expected_exception, expected_http_code,
self.auth_client.backups.delete, backup.id)
def run_backup_create_completed(self):
self._verify_backup(self.backup_info.id)
def _verify_backup(self, backup_id):
def _result_is_active():
backup = self.auth_client.backups.get(backup_id)
if backup.status == 'COMPLETED':
return True
else:
self.assert_not_equal('FAILED', backup.status,
'Backup status should not be')
return False
poll_until(_result_is_active, time_out=self.TIMEOUT_BACKUP_CREATE)
def run_backup_list(self):
backup_list = self.auth_client.backups.list()
self.assert_backup_list(backup_list,
self.backup_count_prior_to_create + 1)
def assert_backup_list(self, backup_list, expected_count):
self.assert_equal(expected_count, len(backup_list),
'Unexpected number of backups found')
if expected_count:
backup = backup_list[0]
self.assert_equal(self.BACKUP_NAME, backup.name,
'Unexpected backup name')
self.assert_equal(self.BACKUP_DESC, backup.description,
'Unexpected backup description')
self.assert_not_equal(0.0, backup.size, 'Unexpected backup size')
self.assert_equal(self.instance_info.id, backup.instance_id,
'Unexpected instance id')
self.assert_equal('COMPLETED', backup.status,
'Unexpected backup status')
def run_backup_list_filter_datastore(self):
backup_list = self.auth_client.backups.list(
datastore=self.instance_info.dbaas_datastore)
self.assert_backup_list(backup_list,
self.backup_count_prior_to_create + 1)
def run_backup_list_filter_different_datastore(self):
backup_list = self.auth_client.backups.list(
datastore='Test_Datastore_1')
self.assert_backup_list(backup_list, 0)
def run_backup_list_filter_datastore_not_found(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.backups.list,
datastore='NOT_FOUND')
def run_backup_list_for_instance(self):
backup_list = self.auth_client.instances.backups(
self.instance_info.id)
self.assert_backup_list(backup_list,
self.backup_count_prior_to_create + 1)
def run_backup_get(self):
backup = self.auth_client.backups.get(self.backup_info.id)
self.assert_backup_list([backup], 1)
self.assert_equal(self.instance_info.dbaas_datastore,
backup.datastore['type'],
'Unexpected datastore type')
self.assert_equal(self.instance_info.dbaas_datastore_version,
backup.datastore['version'],
'Unexpected datastore version')
datastore_version = self.auth_client.datastore_versions.get(
self.instance_info.dbaas_datastore,
self.instance_info.dbaas_datastore_version)
self.assert_equal(datastore_version.id, backup.datastore['version_id'])
def run_backup_get_unauthorized_user(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
self._create_other_client()
self.assert_raises(
expected_exception, None,
self.other_client.backups.get, self.backup_info.id)
# we're using a different client, so we'll check the return code
# on it explicitly, instead of depending on 'assert_raises'
self.assert_client_code(expected_http_code=expected_http_code,
client=self.other_client)
def _create_other_client(self):
if not self.other_client:
requirements = Requirements(is_admin=False)
other_user = CONFIG.users.find_user(
requirements, black_list=[self.instance_info.user.auth_user])
self.other_client = create_dbaas_client(other_user)
def run_restore_from_backup(self):
self.assert_restore_from_backup(self.backup_info.id)
def assert_restore_from_backup(self, backup_ref):
result = self._restore_from_backup(backup_ref)
# TODO(peterstac) - This should probably return code 202
self.assert_client_code(200)
self.assert_equal('BUILD', result.status,
'Unexpected instance status')
self.restore_instance_id = result.id
def _restore_from_backup(self, backup_ref):
restore_point = {'backupRef': backup_ref}
result = self.auth_client.instances.create(
self.instance_info.name + '_restore',
self.instance_info.dbaas_flavor_href,
self.instance_info.volume,
restorePoint=restore_point)
return result
def run_restore_from_backup_completed(
self, expected_states=['BUILD', 'ACTIVE'],
# TODO(peterstac) - This should probably return code 202
expected_http_code=200):
self.assert_restore_from_backup_completed(
self.restore_instance_id, expected_states, expected_http_code)
self.restore_host = self.get_instance_host(self.restore_instance_id)
def assert_restore_from_backup_completed(
self, instance_id, expected_states, expected_http_code):
self.assert_instance_action(instance_id, expected_states,
expected_http_code)
def run_verify_data_in_restored_instance(self):
self.assert_verify_backup_data(self.restore_host)
def run_delete_restored_instance(
self, expected_states=['SHUTDOWN'],
expected_http_code=202):
self.assert_delete_restored_instance(
self.restore_instance_id, expected_states, expected_http_code)
def assert_delete_restored_instance(
self, instance_id, expected_states, expected_http_code):
self.auth_client.instances.delete(instance_id)
self.assert_instance_action(instance_id, expected_states,
expected_http_code)
self.assert_all_gone(instance_id, expected_states[-1])
def run_delete_unknown_backup(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.backups.delete,
'unknown_backup')
def run_delete_backup_unauthorized_user(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
self._create_other_client()
self.assert_raises(
expected_exception, None,
self.other_client.backups.delete, self.backup_info.id)
# we're using a different client, so we'll check the return code
# on it explicitly, instead of depending on 'assert_raises'
self.assert_client_code(expected_http_code=expected_http_code,
client=self.other_client)
def run_delete_backup(self, expected_http_code=202):
self.assert_delete_backup(self.backup_info.id, expected_http_code)
def assert_delete_backup(
self, backup_id, expected_http_code):
self.auth_client.backups.delete(backup_id)
self.assert_client_code(expected_http_code)
self._wait_until_backup_is_gone(backup_id)
def _wait_until_backup_is_gone(self, backup_id):
def _backup_is_gone():
try:
self.auth_client.backups.get(backup_id)
return False
except exceptions.NotFound:
return True
poll_until(_backup_is_gone,
time_out=self.TIMEOUT_BACKUP_DELETE)
def run_check_for_incremental_backup(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
if self.incremental_backup_info is None:
raise SkipTest("Incremental Backup not created")
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.backups.get,
self.incremental_backup_info.id)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Iterable, Iterator, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.bigquery_storage_v1.types import storage
from google.cloud.bigquery_storage_v1.types import stream
from google.cloud.bigquery_storage_v1.types import table
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from .transports.base import BigQueryWriteTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import BigQueryWriteGrpcTransport
from .transports.grpc_asyncio import BigQueryWriteGrpcAsyncIOTransport
class BigQueryWriteClientMeta(type):
"""Metaclass for the BigQueryWrite client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[BigQueryWriteTransport]]
_transport_registry["grpc"] = BigQueryWriteGrpcTransport
_transport_registry["grpc_asyncio"] = BigQueryWriteGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[BigQueryWriteTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class BigQueryWriteClient(metaclass=BigQueryWriteClientMeta):
"""BigQuery Write API.
The Write API can be used to write data to BigQuery.
For supplementary information about the Write API, see:
https://cloud.google.com/bigquery/docs/write-api
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "bigquerystorage.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BigQueryWriteClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BigQueryWriteClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> BigQueryWriteTransport:
"""Returns the transport used by the client instance.
Returns:
BigQueryWriteTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def table_path(project: str, dataset: str, table: str,) -> str:
"""Returns a fully-qualified table string."""
return "projects/{project}/datasets/{dataset}/tables/{table}".format(
project=project, dataset=dataset, table=table,
)
@staticmethod
def parse_table_path(path: str) -> Dict[str, str]:
"""Parses a table path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)/tables/(?P<table>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def write_stream_path(project: str, dataset: str, table: str, stream: str,) -> str:
"""Returns a fully-qualified write_stream string."""
return "projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}".format(
project=project, dataset=dataset, table=table, stream=stream,
)
@staticmethod
def parse_write_stream_path(path: str) -> Dict[str, str]:
"""Parses a write_stream path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)/tables/(?P<table>.+?)/streams/(?P<stream>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, BigQueryWriteTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the big query write client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, BigQueryWriteTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, BigQueryWriteTransport):
# transport is a BigQueryWriteTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def create_write_stream(
self,
request: Union[storage.CreateWriteStreamRequest, dict] = None,
*,
parent: str = None,
write_stream: stream.WriteStream = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> stream.WriteStream:
r"""Creates a write stream to the given table. Additionally, every
table has a special stream named '_default' to which data can be
written. This stream doesn't need to be created using
CreateWriteStream. It is a stream that can be used
simultaneously by any number of clients. Data written to this
stream is considered committed as soon as an acknowledgement is
received.
.. code-block:: python
from google.cloud import bigquery_storage_v1
def sample_create_write_stream():
# Create a client
client = bigquery_storage_v1.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1.CreateWriteStreamRequest(
parent="parent_value",
)
# Make the request
response = client.create_write_stream(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1.types.CreateWriteStreamRequest, dict]):
The request object. Request message for
`CreateWriteStream`.
parent (str):
Required. Reference to the table to which the stream
belongs, in the format of
``projects/{project}/datasets/{dataset}/tables/{table}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
write_stream (google.cloud.bigquery_storage_v1.types.WriteStream):
Required. Stream to be created.
This corresponds to the ``write_stream`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1.types.WriteStream:
Information about a single stream
that gets data inside the storage
system.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, write_stream])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a storage.CreateWriteStreamRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, storage.CreateWriteStreamRequest):
request = storage.CreateWriteStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if write_stream is not None:
request.write_stream = write_stream
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_write_stream]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def append_rows(
self,
requests: Iterator[storage.AppendRowsRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Iterable[storage.AppendRowsResponse]:
r"""Appends data to the given stream.
If ``offset`` is specified, the ``offset`` is checked against
the end of stream. The server returns ``OUT_OF_RANGE`` in
``AppendRowsResponse`` if an attempt is made to append to an
offset beyond the current end of the stream or
``ALREADY_EXISTS`` if user provides an ``offset`` that has
already been written to. User can retry with adjusted offset
within the same RPC connection. If ``offset`` is not specified,
append happens at the end of the stream.
The response contains an optional offset at which the append
happened. No offset information will be returned for appends to
a default stream.
Responses are received in the same order in which requests are
sent. There will be one response for each successful inserted
request. Responses may optionally embed error information if the
originating AppendRequest was not successfully processed.
The specifics of when successfully appended data is made visible
to the table are governed by the type of stream:
- For COMMITTED streams (which includes the default stream),
data is visible immediately upon successful append.
- For BUFFERED streams, data is made visible via a subsequent
``FlushRows`` rpc which advances a cursor to a newer offset
in the stream.
- For PENDING streams, data is not made visible until the
stream itself is finalized (via the ``FinalizeWriteStream``
rpc), and the stream is explicitly committed via the
``BatchCommitWriteStreams`` rpc.
Note: For users coding against the gRPC api directly, it may be
necessary to supply the x-goog-request-params system parameter
with ``write_stream=<full_write_stream_name>``.
More information about system parameters:
https://cloud.google.com/apis/docs/system-parameters
.. code-block:: python
from google.cloud import bigquery_storage_v1
def sample_append_rows():
# Create a client
client = bigquery_storage_v1.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1.AppendRowsRequest(
write_stream="write_stream_value",
)
# This method expects an iterator which contains
# 'bigquery_storage_v1.AppendRowsRequest' objects
# Here we create a generator that yields a single `request` for
# demonstrative purposes.
requests = [request]
def request_generator():
for request in requests:
yield request
# Make the request
stream = client.append_rows(requests=request_generator())
# Handle the response
for response in stream:
print(response)
Args:
requests (Iterator[google.cloud.bigquery_storage_v1.types.AppendRowsRequest]):
The request object iterator. Request message for `AppendRows`.
Due to the nature of AppendRows being a bidirectional
streaming RPC, certain parts of the AppendRowsRequest
need only be specified for the first request sent each
time the gRPC network connection is opened/reopened.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
Iterable[google.cloud.bigquery_storage_v1.types.AppendRowsResponse]:
Response message for AppendRows.
"""
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.append_rows]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata(()),)
# Send the request.
response = rpc(requests, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_write_stream(
self,
request: Union[storage.GetWriteStreamRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> stream.WriteStream:
r"""Gets information about a write stream.
.. code-block:: python
from google.cloud import bigquery_storage_v1
def sample_get_write_stream():
# Create a client
client = bigquery_storage_v1.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1.GetWriteStreamRequest(
name="name_value",
)
# Make the request
response = client.get_write_stream(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1.types.GetWriteStreamRequest, dict]):
The request object. Request message for
`GetWriteStreamRequest`.
name (str):
Required. Name of the stream to get, in the form of
``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1.types.WriteStream:
Information about a single stream
that gets data inside the storage
system.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a storage.GetWriteStreamRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, storage.GetWriteStreamRequest):
request = storage.GetWriteStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_write_stream]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def finalize_write_stream(
self,
request: Union[storage.FinalizeWriteStreamRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> storage.FinalizeWriteStreamResponse:
r"""Finalize a write stream so that no new data can be appended to
the stream. Finalize is not supported on the '_default' stream.
.. code-block:: python
from google.cloud import bigquery_storage_v1
def sample_finalize_write_stream():
# Create a client
client = bigquery_storage_v1.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1.FinalizeWriteStreamRequest(
name="name_value",
)
# Make the request
response = client.finalize_write_stream(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1.types.FinalizeWriteStreamRequest, dict]):
The request object. Request message for invoking
`FinalizeWriteStream`.
name (str):
Required. Name of the stream to finalize, in the form of
``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1.types.FinalizeWriteStreamResponse:
Response message for FinalizeWriteStream.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a storage.FinalizeWriteStreamRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, storage.FinalizeWriteStreamRequest):
request = storage.FinalizeWriteStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.finalize_write_stream]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def batch_commit_write_streams(
self,
request: Union[storage.BatchCommitWriteStreamsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> storage.BatchCommitWriteStreamsResponse:
r"""Atomically commits a group of ``PENDING`` streams that belong to
the same ``parent`` table.
Streams must be finalized before commit and cannot be committed
multiple times. Once a stream is committed, data in the stream
becomes available for read operations.
.. code-block:: python
from google.cloud import bigquery_storage_v1
def sample_batch_commit_write_streams():
# Create a client
client = bigquery_storage_v1.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1.BatchCommitWriteStreamsRequest(
parent="parent_value",
write_streams=['write_streams_value_1', 'write_streams_value_2'],
)
# Make the request
response = client.batch_commit_write_streams(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1.types.BatchCommitWriteStreamsRequest, dict]):
The request object. Request message for
`BatchCommitWriteStreams`.
parent (str):
Required. Parent table that all the streams should
belong to, in the form of
``projects/{project}/datasets/{dataset}/tables/{table}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1.types.BatchCommitWriteStreamsResponse:
Response message for BatchCommitWriteStreams.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a storage.BatchCommitWriteStreamsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, storage.BatchCommitWriteStreamsRequest):
request = storage.BatchCommitWriteStreamsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.batch_commit_write_streams
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def flush_rows(
self,
request: Union[storage.FlushRowsRequest, dict] = None,
*,
write_stream: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> storage.FlushRowsResponse:
r"""Flushes rows to a BUFFERED stream.
If users are appending rows to BUFFERED stream, flush operation
is required in order for the rows to become available for
reading. A Flush operation flushes up to any previously flushed
offset in a BUFFERED stream, to the offset specified in the
request.
Flush is not supported on the \_default stream, since it is not
BUFFERED.
.. code-block:: python
from google.cloud import bigquery_storage_v1
def sample_flush_rows():
# Create a client
client = bigquery_storage_v1.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1.FlushRowsRequest(
write_stream="write_stream_value",
)
# Make the request
response = client.flush_rows(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1.types.FlushRowsRequest, dict]):
The request object. Request message for `FlushRows`.
write_stream (str):
Required. The stream that is the
target of the flush operation.
This corresponds to the ``write_stream`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1.types.FlushRowsResponse:
Respond message for FlushRows.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([write_stream])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a storage.FlushRowsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, storage.FlushRowsRequest):
request = storage.FlushRowsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if write_stream is not None:
request.write_stream = write_stream
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.flush_rows]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("write_stream", request.write_stream),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-bigquery-storage",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("BigQueryWriteClient",)
|
|
"""Test stepping through ObjC method dispatch in various forms."""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestObjCStepping(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers that we will step to in main:
self.main_source = "stepping-tests.m"
self.source_randomMethod_line = line_number(
self.main_source, '// Source randomMethod start line.')
self.sourceBase_randomMethod_line = line_number(
self.main_source, '// SourceBase randomMethod start line.')
self.source_returnsStruct_start_line = line_number(
self.main_source, '// Source returnsStruct start line.')
self.sourceBase_returnsStruct_start_line = line_number(
self.main_source, '// SourceBase returnsStruct start line.')
self.stepped_past_nil_line = line_number(
self.main_source, '// Step over nil should stop here.')
@skipUnlessDarwin
@add_test_categories(['pyapi', 'basic_process'])
def test_with_python_api(self):
"""Test stepping through ObjC method dispatch in various forms."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.main_source_spec = lldb.SBFileSpec(self.main_source)
breakpoints_to_disable = []
break1 = target.BreakpointCreateBySourceRegex(
"// Set first breakpoint here.", self.main_source_spec)
self.assertTrue(break1, VALID_BREAKPOINT)
breakpoints_to_disable.append(break1)
break2 = target.BreakpointCreateBySourceRegex(
"// Set second breakpoint here.", self.main_source_spec)
self.assertTrue(break2, VALID_BREAKPOINT)
breakpoints_to_disable.append(break2)
break3 = target.BreakpointCreateBySourceRegex(
'// Set third breakpoint here.', self.main_source_spec)
self.assertTrue(break3, VALID_BREAKPOINT)
breakpoints_to_disable.append(break3)
break4 = target.BreakpointCreateBySourceRegex(
'// Set fourth breakpoint here.', self.main_source_spec)
self.assertTrue(break4, VALID_BREAKPOINT)
breakpoints_to_disable.append(break4)
break5 = target.BreakpointCreateBySourceRegex(
'// Set fifth breakpoint here.', self.main_source_spec)
self.assertTrue(break5, VALID_BREAKPOINT)
breakpoints_to_disable.append(break5)
break_returnStruct_call_super = target.BreakpointCreateBySourceRegex(
'// Source returnsStruct call line.', self.main_source_spec)
self.assertTrue(break_returnStruct_call_super, VALID_BREAKPOINT)
breakpoints_to_disable.append(break_returnStruct_call_super)
break_step_nil = target.BreakpointCreateBySourceRegex(
'// Set nil step breakpoint here.', self.main_source_spec)
self.assertTrue(break_step_nil, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(process, break1)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint 1.")
thread = threads[0]
mySource = thread.GetFrameAtIndex(0).FindVariable("mySource")
self.assertTrue(mySource, "Found mySource local variable.")
mySource_isa = mySource.GetChildMemberWithName("isa")
self.assertTrue(mySource_isa, "Found mySource->isa local variable.")
className = mySource_isa.GetSummary()
if self.TraceOn():
print(mySource_isa)
# Lets delete mySource so we can check that after stepping a child variable
# with no parent persists and is useful.
del (mySource)
# Now step in, that should leave us in the Source randomMethod:
thread.StepInto()
line_number = thread.GetFrameAtIndex(0).GetLineEntry().GetLine()
self.assertTrue(
line_number == self.source_randomMethod_line,
"Stepped into Source randomMethod.")
# Now step in again, through the super call, and that should leave us
# in the SourceBase randomMethod:
thread.StepInto()
line_number = thread.GetFrameAtIndex(0).GetLineEntry().GetLine()
self.assertTrue(
line_number == self.sourceBase_randomMethod_line,
"Stepped through super into SourceBase randomMethod.")
threads = lldbutil.continue_to_breakpoint(process, break2)
self.assertTrue(
len(threads) == 1,
"Continued to second breakpoint in main.")
# Again, step in twice gets us to a stret method and a stret super
# call:
thread = threads[0]
thread.StepInto()
line_number = thread.GetFrameAtIndex(0).GetLineEntry().GetLine()
self.assertTrue(
line_number == self.source_returnsStruct_start_line,
"Stepped into Source returnsStruct.")
threads = lldbutil.continue_to_breakpoint(
process, break_returnStruct_call_super)
self.assertTrue(
len(threads) == 1,
"Stepped to the call super line in Source returnsStruct.")
thread = threads[0]
thread.StepInto()
line_number = thread.GetFrameAtIndex(0).GetLineEntry().GetLine()
self.assertTrue(
line_number == self.sourceBase_returnsStruct_start_line,
"Stepped through super into SourceBase returnsStruct.")
# Cool now continue to get past the call that initializes the Observer, and then do our steps in again to see that
# we can find our way when we're stepping through a KVO swizzled
# object.
threads = lldbutil.continue_to_breakpoint(process, break3)
self.assertTrue(
len(threads) == 1,
"Continued to third breakpoint in main, our object should now be swizzled.")
newClassName = mySource_isa.GetSummary()
if self.TraceOn():
print("className is %s, newClassName is %s" % (className, newClassName))
print(mySource_isa)
self.assertTrue(
newClassName != className,
"The isa did indeed change, swizzled!")
# Now step in, that should leave us in the Source randomMethod:
thread = threads[0]
thread.StepInto()
line_number = thread.GetFrameAtIndex(0).GetLineEntry().GetLine()
self.assertTrue(
line_number == self.source_randomMethod_line,
"Stepped into Source randomMethod in swizzled object.")
# Now step in again, through the super call, and that should leave us
# in the SourceBase randomMethod:
thread.StepInto()
line_number = thread.GetFrameAtIndex(0).GetLineEntry().GetLine()
self.assertTrue(
line_number == self.sourceBase_randomMethod_line,
"Stepped through super into SourceBase randomMethod in swizzled object.")
threads = lldbutil.continue_to_breakpoint(process, break4)
self.assertTrue(
len(threads) == 1,
"Continued to fourth breakpoint in main.")
thread = threads[0]
# Again, step in twice gets us to a stret method and a stret super
# call:
thread.StepInto()
line_number = thread.GetFrameAtIndex(0).GetLineEntry().GetLine()
self.assertTrue(
line_number == self.source_returnsStruct_start_line,
"Stepped into Source returnsStruct in swizzled object.")
threads = lldbutil.continue_to_breakpoint(
process, break_returnStruct_call_super)
self.assertTrue(
len(threads) == 1,
"Stepped to the call super line in Source returnsStruct - second time.")
thread = threads[0]
thread.StepInto()
line_number = thread.GetFrameAtIndex(0).GetLineEntry().GetLine()
self.assertTrue(
line_number == self.sourceBase_returnsStruct_start_line,
"Stepped through super into SourceBase returnsStruct in swizzled object.")
for bkpt in breakpoints_to_disable:
bkpt.SetEnabled(False)
threads = lldbutil.continue_to_breakpoint(process, break_step_nil)
self.assertTrue(len(threads) == 1, "Continued to step nil breakpoint.")
thread.StepInto()
line_number = thread.GetFrameAtIndex(0).GetLineEntry().GetLine()
self.assertTrue(
line_number == self.stepped_past_nil_line,
"Step in over dispatch to nil stepped over.")
|
|
from __future__ import unicode_literals
import base64
from datetime import datetime, timedelta
import logging
import string
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.crypto import constant_time_compare
from django.utils.crypto import get_random_string
from django.utils.crypto import salted_hmac
from django.utils import timezone
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
from django.contrib.sessions.exceptions import SuspiciousSession
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_string(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, *args):
self.modified = self.modified or key in self._session
return self._session.pop(key, *args)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary serialized and encoded as a string."
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii')
def decode(self, session_data):
encoded_data = base64.b64decode(force_bytes(session_data))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def is_empty(self):
"Returns True when there is no session_key and the session is empty"
try:
return not bool(self._session_key) and not self._session_cache
except AttributeError:
return True
def _get_new_session_key(self):
"Returns session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
break
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _get_session_key(self):
return self._session_key
session_key = property(_get_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self._session_key = None
def cycle_key(self):
"""
Creates a new session key, whilst retaining the current session data.
"""
data = self._session_cache
key = self.session_key
self.create()
self._session_cache = data
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError('subclasses of SessionBase must provide an exists() method')
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError('subclasses of SessionBase must provide a create() method')
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() can update an existing object with the same key.
"""
raise NotImplementedError('subclasses of SessionBase must provide a save() method')
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError('subclasses of SessionBase must provide a delete() method')
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError('subclasses of SessionBase must provide a load() method')
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError('This backend does not support clear_expired().')
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from colorama import Fore
from spotify_ripper.utils import *
import os
import time
import spotify
import requests
import re
class WebAPI(object):
def __init__(self, args, ripper):
self.args = args
self.ripper = ripper
self.cache = {}
def cache_result(self, uri, result):
self.cache[uri] = result
def get_cached_result(self, uri):
return self.cache.get(uri)
def request_json(self, url, msg):
print(Fore.GREEN + "Attempting to retrieve " + msg +
" from Spotify's Web API" + Fore.RESET)
print(Fore.CYAN + url + Fore.RESET)
req = requests.get(url)
if req.status_code == 200:
return req.json()
else:
print(Fore.YELLOW + "URL returned non-200 HTTP code: " +
str(req.status_code) + Fore.RESET)
return None
def api_url(self, url_path):
return 'https://api.spotify.com/v1/' + url_path
def charts_url(self, url_path):
return 'https://spotifycharts.com/api/' + url_path
# excludes 'appears on' albums for artist
def get_albums_with_filter(self, uri):
args = self.args
album_type = ('&album_type=' + args.artist_album_type[0]) \
if args.artist_album_type is not None else ""
market = ('&market=' + args.artist_album_market[0]) \
if args.artist_album_market is not None else ""
def get_albums_json(offset):
url = self.api_url(
'artists/' + uri_tokens[2] +
'/albums/?=' + album_type + market +
'&limit=50&offset=' + str(offset))
return self.request_json(url, "albums")
# check for cached result
cached_result = self.get_cached_result(uri)
if cached_result is not None:
return cached_result
# extract artist id from uri
uri_tokens = uri.split(':')
if len(uri_tokens) != 3:
return []
# it is possible we won't get all the albums on the first request
offset = 0
album_uris = []
total = None
while total is None or offset < total:
try:
# rate limit if not first request
if total is None:
time.sleep(1.0)
albums = get_albums_json(offset)
if albums is None:
break
# extract album URIs
album_uris += [album['uri'] for album in albums['items']]
offset = len(album_uris)
if total is None:
total = albums['total']
except KeyError as e:
break
print(str(len(album_uris)) + " albums found")
self.cache_result(uri, album_uris)
return album_uris
def get_artists_on_album(self, uri):
def get_album_json(album_id):
url = self.api_url('albums/' + album_id)
return self.request_json(url, "album")
# check for cached result
cached_result = self.get_cached_result(uri)
if cached_result is not None:
return cached_result
# extract album id from uri
uri_tokens = uri.split(':')
if len(uri_tokens) != 3:
return None
album = get_album_json(uri_tokens[2])
if album is None:
return None
result = [artist['name'] for artist in album['artists']]
self.cache_result(uri, result)
return result
# genre_type can be "artist" or "album"
def get_genres(self, genre_type, track):
def get_genre_json(spotify_id):
url = self.api_url(genre_type + 's/' + spotify_id)
return self.request_json(url, "genres")
# extract album id from uri
item = track.artists[0] if genre_type == "artist" else track.album
uri = item.link.uri
# check for cached result
cached_result = self.get_cached_result(uri)
if cached_result is not None:
return cached_result
uri_tokens = uri.split(':')
if len(uri_tokens) != 3:
return None
json_obj = get_genre_json(uri_tokens[2])
if json_obj is None:
return None
result = json_obj["genres"]
self.cache_result(uri, result)
return result
# doesn't seem to be officially supported by Spotify
def get_charts(self, uri):
def get_tracks_json(metrics, region, time_window, from_date):
limit = "50" if metrics == "viral" else "200"
url = self.charts_url(
"?limit=" + limit + "&country=" + region +
"&recurrence=" + time_window + "&date=" + from_date +
"&type=" + metrics)
return self.request_json(url, region + " " + metrics + " charts")
# check for cached result
cached_result = self.get_cached_result(uri)
if cached_result is not None:
return cached_result
# spotify:charts:metric:region:time_window:date
uri_tokens = uri.split(':')
if len(uri_tokens) != 6:
return None
# some sanity checking
valid_metrics = {"regional", "viral"}
valid_regions = {"us", "gb", "ad", "ar", "at", "au", "be", "bg", "bo",
"br", "ca", "ch", "cl", "co", "cr", "cy", "cz", "de",
"dk", "do", "ec", "ee", "es", "fi", "fr", "gr", "gt",
"hk", "hn", "hu", "id", "ie", "is", "it", "lt", "lu",
"lv", "mt", "mx", "my", "ni", "nl", "no", "nz", "pa",
"pe", "ph", "pl", "pt", "py", "se", "sg", "sk", "sv",
"tr", "tw", "uy", "global"}
valid_windows = {"daily", "weekly"}
def sanity_check(val, valid_set):
if val not in valid_set:
print(Fore.YELLOW +
"Not a valid Spotify charts URI parameter: " +
val + Fore.RESET)
print("Valid parameter options are: [" +
", ".join(valid_set)) + "]"
return False
return True
def sanity_check_date(val):
if re.match(r"^\d{4}-\d{2}-\d{2}$", val) is None and \
val != "latest":
print(Fore.YELLOW +
"Not a valid Spotify charts URI parameter: " +
val + Fore.RESET)
print("Valid parameter options are: ['latest', a date "
"(e.g. 2016-01-21)]")
return False
return True
check_results = sanity_check(uri_tokens[2], valid_metrics) and \
sanity_check(uri_tokens[3], valid_regions) and \
sanity_check(uri_tokens[4], valid_windows) and \
sanity_check_date(uri_tokens[5])
if not check_results:
print("Generally, a charts URI follow the pattern "
"spotify:charts:metric:region:time_window:date")
return None
json_obj = get_tracks_json(uri_tokens[2], uri_tokens[3],
uri_tokens[4], uri_tokens[5])
if json_obj is None:
return None
self.cache_result(uri, json_obj)
return json_obj
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 16 15:40:29 2016
@author: okada
$Id: signature.py 205 2017-08-08 06:25:59Z aokada $
"""
########### js template
js_header = """(function() {
sig_data = {};
"""
js_footer = """
})();
Object.freeze(sig_data);
"""
js_dataset = """
sig_data.tooltip_format = {{
signature_title:{signature_title},
signature_partial:{signature_partial},
mutation_title:{mutation_title},
mutation_partial:{mutation_partial},
}};
sig_data.signatures = [{signatures}];
sig_data.sig_colors = [{colors}];
sig_data.dataset_sig = [{dataset_sig}];
sig_data.dataset_sig_max = {dataset_sig_max};
sig_data.route_id = [{route_id}];
sig_data.substitution = [{substruction}];
// [ID, signature, value]
sig_data.mutations = [{mutations}];
sig_data.mutation_count = [{mutation_count}];
sig_data.Ids = [{Ids}];
"""
js_substruction_template = "{{name: '{name}', color: '{color}', route: [{route}],}},"
########### HTML template
html_integral_template = """<table>
<tr>
<td style="vertical-align: top;" ><div style="float: left;" id="div_rate"></div></td>
<td style="vertical-align: top;><!-- legend --> <div style="float: left;" id='div_rate_legend_html'></div><div style="float: left;" id='div_rate_legend_svg'></div></td>
</tr>
<tr>
<td style="vertical-align: top;><div style="float: left;" id="div_integral"></div></td>
<td style="vertical-align: top;><!-- legend --> <div style="float: left;" id='div_integral_legend_html'></div><div style="float: left;" id='div_integral_legend_svg'></div></td>
</tr>
<tr>
<td colspan=2 style="padding-top: 20px;">
<p>View mode: <select id="chart_mode"></select></p>
<p>Sort by: <select id="chart_sort"></select></p>
</td>
</tr>
</table>
"""
########### functions
def output_html(params, config):
dataset = convert_tojs(params, config)
if dataset != None and dataset != {}:
create_html(dataset, params, config)
return dataset
def convert_tojs(params, config):
import os
import json
import math
import itertools
import paplot.subcode.tools as tools
import paplot.convert as convert
import paplot.color as color
# data read
try:
json_data = json.load(open(params["data"]))
except Exception as e:
print ("failure open data %s, %s" % (params["data"], e.message))
return None
key_ids = tools.config_getstr(config, "result_format_signature", "key_id")
key_signature = tools.config_getstr(config, "result_format_signature", "key_signature")
key_mutations = tools.config_getstr(config, "result_format_signature", "key_mutation")
key_mutation_count = tools.config_getstr(config, "result_format_signature", "key_mutation_count")
sig_num = len(json_data[key_signature])
if sig_num == 0:
print ("no data %s" % params["data"])
return {}
# signature names
signature_list = []
for s in range(sig_num):
signature_list.append("Signature %d" % (s+1))
# each signature colors
sig_color_list = color.create_color_array(sig_num, color.r_set2)
# use background?
if tools.config_getboolean(config, "result_format_signature", "background"):
signature_list.append("Background ")
sig_color_list.append(color.r_set2_gray)
# axis-y max
sig_y_max = tools.config_getint(config, "signature", "signature_y_max")
if (sig_y_max < 0):
for sig in json_data[key_signature]:
for sub in sig:
m = max(sub)
if sig_y_max < m:
sig_y_max = m
# route list
sub_num = len(json_data[key_signature][0][0])
log = math.log(sub_num, 4)
if log % 1 > 0:
print ("substitution's list length is invalid (%d, not number 4^N)" % sub_num)
return None
route_id = []
route_list = []
for p in itertools.product(("A","C","G","T"), repeat = int(log)):
route_id.append("".join(p))
route_list.append(p)
# substruction
sub_di = [
{"name":"C > A", "ref":"C", "color":tools.config_getstr(config, "signature", "alt_color_CtoA")},
{"name":"C > G", "ref":"C", "color":tools.config_getstr(config, "signature", "alt_color_CtoG")},
{"name":"C > T", "ref":"C", "color":tools.config_getstr(config, "signature", "alt_color_CtoT")},
{"name":"T > A", "ref":"T", "color":tools.config_getstr(config, "signature", "alt_color_TtoA")},
{"name":"T > C", "ref":"T", "color":tools.config_getstr(config, "signature", "alt_color_TtoC")},
{"name":"T > G", "ref":"T", "color":tools.config_getstr(config, "signature", "alt_color_TtoG")},
]
substruction = ""
for sub in sub_di:
route = []
for r in route_list:
route.append("p".join(r[0:int(log/2)]) + "p" + sub["ref"] + "p" + "p".join(r[int(log/2):]))
substruction += js_substruction_template.format(name = sub["name"], color = sub["color"], route = convert.list_to_text(route))
# Id list
id_txt = ""
if key_ids in json_data:
id_txt = convert.list_to_text(json_data[key_ids])
# mutations
mutations_txt = ""
if key_mutations in json_data:
for m in json_data[key_mutations]:
mutations_txt += "[%d,%d,%f]," % (m[0],m[1],m[2])
# signature
dataset_sig = ""
for sig in json_data[key_signature]:
tmp = ""
for sub in sig:
tmp += "[" + ",".join(map(str, sub)) + "],"
dataset_sig += ("[" + tmp + "],")
mutation_count_txt = ""
if (key_mutation_count != "") and (key_mutation_count in json_data.keys()):
for v in json_data[key_mutation_count]:
mutation_count_txt += "%d," % v
# output
sig_num_sift = 0
if tools.config_getboolean(config, "result_format_signature", "background"):
sig_num_sift = 1
ellipsis = "%s%d" % (params["ellipsis"], (sig_num + sig_num_sift))
js_file = "data_%s.js" % ellipsis
html_file = "graph_%s.html" % ellipsis
keys_di = {"sig":"", "route":"", "id":""}
f = open(params["dir"] + "/" + js_file, "w")
f.write(js_header \
+ js_dataset.format(Ids = id_txt, \
signatures = convert.list_to_text(signature_list), \
colors = convert.list_to_text(sig_color_list), \
dataset_sig_max = sig_y_max, \
mutations = mutations_txt, \
dataset_sig = dataset_sig, \
route_id = convert.list_to_text(route_id), \
substruction = substruction, \
signature_title = convert.pyformat_to_jstooltip_text(keys_di, config, "signature", "", "tooltip_format_signature_title"), \
signature_partial = convert.pyformat_to_jstooltip_text(keys_di, config, "signature", "", "tooltip_format_signature_partial"), \
mutation_title = convert.pyformat_to_jstooltip_text(keys_di, config, "signature", "", "tooltip_format_mutation_title"), \
mutation_partial = convert.pyformat_to_jstooltip_text(keys_di, config, "signature", "", "tooltip_format_mutation_partial"), \
mutation_count = mutation_count_txt, \
)
)
f_template = open(os.path.dirname(os.path.abspath(__file__)) + "/templates/data_signature.js")
js_function = f_template.read()
f_template.close()
f.write(js_function)
f.write(js_footer)
f.close()
integral = True
if key_ids == "" or key_mutations == "" or key_mutation_count == "":
integral = False
return {"sig_num": sig_num,
"js": js_file,
"html": html_file,
"intergral": integral,
}
def create_html(dataset, params, config):
import os
import paplot.subcode.tools as tools
import paplot.prep as prep
html_div_template = "<div style='float: left;' id='div_pm{id}'></div>\n"
html_add_template = "sig_draw.add_div('div_pm{id}');\n"
div_text = ""
add_text = ""
for i in range(dataset["sig_num"]):
div_text += html_div_template.format(id = i)
add_text += html_add_template.format(id = i)
integral_text = ""
if dataset["intergral"] == True:
integral_text = html_integral_template
f_template = open(os.path.dirname(os.path.abspath(__file__)) + "/templates/graph_signature.html")
html_template = f_template.read()
f_template.close()
sig_num_sift = 0
if tools.config_getboolean(config, "result_format_signature", "background"):
sig_num_sift = 1
f_html = open(params["dir"] + "/" + dataset["html"], "w")
f_html.write(
html_template.format(project = params["project"],
title = "%s(#sig %d)" % (params["title"], dataset["sig_num"] + sig_num_sift),
data_js = dataset["js"],
version = prep.version_text(),
date = tools.now_string(),
divs = div_text,
add_divs = add_text,
integral = integral_text,
style = "../style/%s" % os.path.basename(tools.config_getpath(config, "style", "path", "default.js")),
))
f_html.close()
|
|
'''
@summary: Handles all notifications for api, including alerts from metron.
Note: Ensure username and password needs hidden / parsed from encrypted file.
@author: devopsec
'''
import sys
import traceback
from api import app, time_funcs
from api.parse_json import *
from api.decorators import async
from api.sql.models import user_data
from api.company.endpoint import companyUtils
from flask import jsonify, request, json, render_template
from flask_mail import Mail, Message
from flask_restful import Resource, reqparse
from starbase import Connection
import requests, base64, subprocess, os
# email server config #
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
app.config['MAIL_USERNAME'] = 'threatdetectionservice@gmail.com'
app.config['MAIL_PASSWORD'] = 'flyball2011'
app.config['MAIL_ASCII_ATTACHMENTS'] = False
app.config['MAIL_DEBUG'] = True
app.config['MAIL_DEFAULT_SENDER'] = 'threatdetectionservice@gmail.com'
app.config['MAIL_DEFAULT_ADMIN'] = ['threatdetectionservice@gmail.com']
# create mail object #
mail = Mail(app)
# define hbase vars #
metronHBaseRestURL = "http://10.10.10.154"
metronHbaseRestPort = 9082
metronHBaseTable = "enrichment"
metronHBaseCF = "assets"
apiBaseRestUrl = "http://0.0.0.0:7777"
apiCompanyEndpoint = "/api/company/"
assetQueryURL = metronHBaseRestURL + ":" + str(metronHbaseRestPort) + "/" + metronHBaseTable
# abstraction of email and sms functions
@async
def send_async_email(app, msg):
''' sends mail asynchronously '''
with app.app_context():
mail.send(msg)
def send_email(recipients, text_body, html_body=None, subject="Threat Notification Service",
sender="threatdetectionservice@gmail.com", threat_data=None):
''' recipients and text_body params are required '''
# add threat_data if provided
if not threat_data == None:
text_body += threat_data
msg = Message(recipients=recipients, subject=subject, html=html_body, sender=sender)
if not html_body == None:
pass
# TODO finish html template for emails
# msg.html = render_template('email.html', threat_level=threat_level, incident_type=incident_type,
# incident_source=incident_source, incident_time=incident_time,
# attack_method=attack_method, source=source, destination=destination)
else:
pass
# msg.html = html_body
send_async_email(app, msg)
@async
def send_sms(to, msg, frm="12485042987", threat_data=None):
''' to, frm, and msg params are required '''
CWD = os.getcwd()
script_path = os.path.join(CWD, 'notification', 'flowroute', 'send_sms.py')
# DEBUG
print("path " + script_path)
# TODO make path cross-platform compatible
python2_env = {"PYTHONPATH": "/usr/bin/python2.7"}
if not threat_data == None:
msg += threat_data
# convert to arg into string representation for cmd line
# toStrList = []
# for number in to:
# toStrList.append(str(number))
cmd = "python2.7 {0} -t {1} -f {2} -m '{3}'".format(script_path, to, frm, msg)
print(cmd)
subprocess.run(cmd, env=python2_env, shell=True)
#out_str = subprocess.check_output(cmd, shell=True)
# DEBUG
# print(cmd)
# print(out_str)
class manageNotifications(Resource):
''' Handles processing of notifications with following functions:
Post threat notification into an email or sms message and alert users
Process threat-intel and conditionally alert user as threat notification '''
threat_data = None # class variable
contact_info = [] # class variable
def post(self):
''' process a notification '''
MASS_ALERT_FLAG = False
URL = request.url
# process alert #
if URL.find("api/notifications/alert") > 0:
try:
parser = reqparse.RequestParser()
parser.add_argument('threat_intel', type=dict, location='json')
args = parser.parse_args()
if args['threat_intel']['_source']['is_alert'] == "true":
# gather necessary info from threat_intel
manageNotifications.threat_data = {
"index": args['threat_intel']['_index'],
"score": args['threat_intel']['_score'],
"threat_level": args['threat_intel']['_source']['threat.triage.level'],
"source": args['threat_intel']['_source']['source.type'],
"ip_src_addr": args['threat_intel']['_source']['ip_src_addr'],
"ip_dst_addr": args['threat_intel']['_source']['ip_dst_addr'],
"url": args['threat_intel']['_source']['url'],
"time": time_funcs.convert_epoch_ts(args['threat_intel']['_source']['timestamp'])
}
# TODO enrich threat-intel in metron with source company name
# TODO check threat_data to find where alert is from (what company)
company = "ALL"
company = "Flyball-Labs"
if company == "ALL": # for alerting all companies, in event of a data breach
MASS_ALERT_FLAG = True
response = companyUtils.get_all_poc_list()
else: # alert a single company
response = companyUtils.get_company_poc_list(company)
# gather contact info from company & get notification settings for each poc
if MASS_ALERT_FLAG == True:
if response['response'] == 200:
all_poc_list = response['all_company_poc']
for co in all_poc_list:
for poc in co['poc']:
user = user_data.query.filter_by(username=poc).first()
manageNotifications.contact_info.append({
"name": user.firstname,
"phone": user.phone_number,
"email": user.email,
"alert_type": user.notification['alert_type'],
"notification_type": user.notification['notification_type']
})
else: # could not get poc list
return jsonify(
response = 400,
message = "Could not obtain POC list"
)
else:
if response['response'] == 200:
poc_list = response['poc']
for poc in poc_list:
user = user_data.query.filter_by(username=poc).first()
manageNotifications.contact_info.append({
"name": user.firstname,
"phone": user.phone_number,
"email": user.email,
"alert_type": user.notification['alert_type'],
"notification_type": user.notification['notification_type']
})
else: # could not get poc list
return jsonify(
response = 400,
message = "Could not obtain POC list"
)
# iterate through contact info and send message if score >= user setting
for contact in manageNotifications.contact_info:
if manageNotifications.threat_data['score'] >= contact['alert_type']:
if contact['notification_type'] == "email":
send_email(recipients=[contact['email']],
text_body="Hello " + contact['name'] + ",\n\nThere was a threat detected on your network at " +
manageNotifications.threat_data['time'] + "\nA summary of the details are provided below.\n" +
"For more information, login to your account, and view the ThreatDetection Service Dashboard.\n",
threat_data=json_encode(manageNotifications.threat_data))
elif contact['notification_type'] == "sms":
send_sms(to=contact['phone'],
msg="Hello " + contact['name'] + ",\n\nThere was a threat detected on your network at " +
manageNotifications.threat_data['time'] + "\nA summary of the details are provided below.\n" +
"For more information, login to your account, and view the ThreatDetection Service Dashboard.\n",
threat_data=json_encode(manageNotifications.threat_data))
return jsonify(
response = 200,
message = "Alert parsing successful"
)
except Exception as e:
# DEBUG only (security risk : TMI)
print("Unexpected error:", sys.exc_info()[0]) # sys info
print(type(e)) # the exception instance
print(e.args) # arguments stored in .args
print(e) # the actual error
traceback.print_tb(e.__traceback__) # print stack trace
# send email #
if URL.find("api/notifications/email") > 0:
try:
parser = reqparse.RequestParser()
parser.add_argument('recipients', type=list, location='json')
parser.add_argument('subject', type=str, location='json')
parser.add_argument('text_body', type=str, location='json')
parser.add_argument('html_body', type=str, location='json')
parser.add_argument('sender', type=str, location='json')
args = parser.parse_args()
# DEBUG
# return jsonify(
# subject = args['subject'],
# recipients = args['recipients'],
# text_body = args['text_body'],
# html_body = args['html_body'],
# sender = args['sender']
# )
send_email(args['subject'], args['recipients'], args['text_body'],
args['html_body'], args['sender'])
return jsonify(
response = 200,
message = 'Email delivery success'
)
except Exception as e:
return {'error' : str(e)} # DEBUG only (security risk : TMI)
#send sms
elif URL.find("api/notifications/sms") > 0:
try:
parser = reqparse.RequestParser()
parser.add_argument('to', type=int, location='json')
parser.add_argument('frm', type=int, location='json')
parser.add_argument('msg', type=str, location='json')
args = parser.parse_args()
# DEBUG
# return jsonify(
# to = json_decode(args['to']),
# frm = args['frm'],
# msg = args['msg']
# )
send_sms(to=args['to'], frm=args['frm'], msg=args['msg'])
return jsonify(
response = 200,
message = 'SMS delivery success'
)
except Exception as e:
return {'error' : str(e)} # DEBUG only (security risk : TMI)
def get(self, threat_id):
''' get threat-intel data for threat notification '''
assetFullQueryURL = assetQueryURL + "/" + threat_id
print(assetFullQueryURL)
try:
response = requests.get(assetFullQueryURL, headers={"Accept" : "application/json"})
jData = response.json()
except:
return "Server Down"
decodedList = []
for row in jData['Row']:
# Decode the key into ascii #
#rowKey = base64.b64decode(row['key']).decode('ascii')
dColumn = {}
for cell in row['Cell']:
columnname = base64.b64decode(cell['column']).decode('ascii')
value = base64.b64decode(cell['$']).decode('ascii')
dColumn[columnname] = value
decodedList.append (dColumn)
return jsonify(threat_intel=decodedList)
|
|
import pytest
from jsonmodels import models, fields, errors
def test_model1():
class Person(models.Base):
name = fields.StringField()
surname = fields.StringField()
age = fields.IntField()
alan = Person()
alan.name = 'Alan'
alan.surname = 'Wake'
alan.age = 34
def test_required():
class Person(models.Base):
name = fields.StringField(required=True)
surname = fields.StringField()
age = fields.IntField()
alan = Person()
with pytest.raises(errors.ValidationError):
alan.validate()
alan.name = 'Chuck'
alan.validate()
def test_type_validation():
class Person(models.Base):
name = fields.StringField()
age = fields.IntField()
alan = Person()
alan.age = 42
def test_base_field_should_not_be_usable():
class Person(models.Base):
name = fields.BaseField()
alan = Person()
with pytest.raises(errors.ValidationError):
alan.name = 'some name'
with pytest.raises(errors.ValidationError):
alan.name = 2345
def test_value_replacements():
class Person(models.Base):
name = fields.StringField()
age = fields.IntField()
cash = fields.FloatField()
children = fields.ListField()
alan = Person()
assert alan.name is None
assert alan.age is None
assert alan.cash is None
assert isinstance(alan.children, list)
def test_list_field():
class Car(models.Base):
wheels = fields.ListField()
viper = Car()
viper.wheels.append('some')
viper.wheels.append('not necessarily')
viper.wheels.append('proper')
viper.wheels.append('wheels')
def test_list_field_types():
class Wheel(models.Base):
pass
class Wheel2(models.Base):
pass
class Car(models.Base):
wheels = fields.ListField(items_types=[Wheel])
viper = Car()
viper.wheels.append(Wheel())
viper.wheels.append(Wheel())
with pytest.raises(errors.ValidationError):
viper.wheels.append(Wheel2)
def test_list_field_types_when_assigning():
class Wheel(models.Base):
pass
class Wheel2(models.Base):
pass
class Car(models.Base):
wheels = fields.ListField(items_types=[Wheel])
viper = Car()
viper.wheels.append(Wheel())
viper.wheels[0] = Wheel()
with pytest.raises(errors.ValidationError):
viper.wheels[1] = Wheel2
def test_list_field_for_subtypes():
class Car(models.Base):
pass
class Viper(Car):
pass
class Lamborghini(Car):
pass
class Garage1(models.Base):
cars = fields.ListField(items_types=[Car])
garage = Garage1()
garage.cars.append(Car())
garage.cars.append(Viper())
garage.cars.append(Lamborghini())
class Garage2(models.Base):
cars = fields.ListField(items_types=[Viper, Lamborghini])
garage = Garage2()
garage.cars.append(Viper())
garage.cars.append(Lamborghini())
with pytest.raises(errors.ValidationError):
garage.cars.append(Car())
def test_list_validation():
class Garage(models.Base):
cars = fields.ListField()
garage = Garage()
with pytest.raises(errors.ValidationError):
garage.cars = 'some string'
def test_embedded_model():
class Secondary(models.Base):
data = fields.IntField()
class Primary(models.Base):
name = fields.StringField()
secondary = fields.EmbeddedField(Secondary)
entity = Primary()
assert entity.secondary is None
entity.name = 'chuck'
entity.secondary = Secondary()
entity.secondary.data = 42
with pytest.raises(errors.ValidationError):
entity.secondary = 'something different'
entity.secondary = None
def test_embedded_required_validation():
class Secondary(models.Base):
data = fields.IntField(required=True)
class Primary(models.Base):
name = fields.StringField()
secondary = fields.EmbeddedField(Secondary)
entity = Primary()
sec = Secondary()
sec.data = 33
entity.secondary = sec
with pytest.raises(errors.ValidationError):
entity.secondary.data = None
entity.secondary = None
class Primary(models.Base):
name = fields.StringField()
secondary = fields.EmbeddedField(Secondary, required=True)
entity = Primary()
sec = Secondary()
sec.data = 33
entity.secondary = sec
with pytest.raises(errors.ValidationError):
entity.secondary.data = None
def test_embedded_inheritance():
class Car(models.Base):
pass
class Viper(Car):
pass
class Lamborghini(Car):
pass
class ParkingPlace(models.Base):
location = fields.StringField()
car = fields.EmbeddedField([Viper, Lamborghini])
place = ParkingPlace()
place.car = Viper()
place.car = Lamborghini()
with pytest.raises(errors.ValidationError):
place.car = Car()
class ParkingPlace(models.Base):
location = fields.StringField()
car = fields.EmbeddedField(Car)
place = ParkingPlace()
place.car = Viper()
place.car = Lamborghini()
place.car = Car()
def test_iterable():
class Person(models.Base):
name = fields.StringField()
surname = fields.StringField()
age = fields.IntField()
cash = fields.FloatField()
alan = Person()
alan.name = 'Alan'
alan.surname = 'Wake'
alan.age = 24
alan.cash = 2445.45
pattern = {
'name': 'Alan',
'surname': 'Wake',
'age': 24,
'cash': 2445.45,
}
result = {}
for name, field in alan:
result[name] = field.__get__(alan)
assert pattern == result
def test_get_field():
name_field = fields.StringField()
surname_field = fields.StringField()
age_field = fields.IntField()
class Person(models.Base):
name = name_field
surname = surname_field
age = age_field
alan = Person()
assert alan.get_field('name') is name_field
assert alan.get_field('surname') is surname_field
assert alan.get_field('age') is age_field
def test_repr():
class Person(models.Base):
name = fields.StringField()
surname = fields.StringField()
age = fields.IntField()
chuck = Person()
assert chuck.__repr__() == 'Person()'
assert chuck.__str__() == 'Person object'
class Person2(models.Base):
name = fields.StringField()
surname = fields.StringField()
age = fields.IntField()
def __str__(self):
return self.name
chuck = Person2()
assert chuck.__repr__() == 'Person2()'
chuck.name = 'Chuck'
assert chuck.__repr__() == "Person2(name='Chuck')"
assert chuck.__str__() == 'Chuck'
chuck.name = 'Testa'
chuck.age = 42
assert chuck.__repr__() == "Person2(age=42, name='Testa')"
assert chuck.__str__() == 'Testa'
def test_list_field_with_non_model_types():
class Person(models.Base):
names = fields.ListField(str)
surname = fields.StringField()
person = Person(surname='Norris')
person.names.append('Chuck')
person.names.append('Testa')
def test_help_text():
class Person(models.Base):
name = fields.StringField(help_text='Name of person.')
age = fields.IntField(help_text='Age of person.')
person = Person()
assert person.get_field('name').help_text == 'Name of person.'
assert person.get_field('age').help_text == 'Age of person.'
def test_types():
class Person(object):
pass
class Person2(object):
pass
allowed_types = (Person,)
field = fields.EmbeddedField(allowed_types)
assert allowed_types == field.types
allowed_types = (Person, Person2)
field = fields.EmbeddedField(allowed_types)
assert allowed_types == field.types
def test_items_types():
class Person(object):
pass
class Person2(object):
pass
allowed_types = (Person,)
field = fields.ListField(allowed_types)
assert allowed_types == field.items_types
allowed_types = (Person, Person2)
field = fields.ListField(allowed_types)
assert allowed_types == field.items_types
field = fields.ListField()
assert tuple() == field.items_types
def test_required_embedded_field():
class Secondary(models.Base):
data = fields.IntField()
class Primary(models.Base):
name = fields.StringField()
secondary = fields.EmbeddedField(Secondary, required=True)
entity = Primary()
with pytest.raises(errors.ValidationError):
entity.validate()
entity.secondary = Secondary()
entity.validate()
class Primary(models.Base):
name = fields.StringField()
secondary = fields.EmbeddedField(Secondary, required=False)
entity = Primary()
entity.validate()
entity.secondary = None
entity.validate()
def test_assignation_of_list_of_models():
class Wheel(models.Base):
pass
class Car(models.Base):
wheels = fields.ListField(items_types=[Wheel])
viper = Car()
viper.wheels = None
viper.wheels = [Wheel()]
def test_equality_of_different_types():
class A(models.Base):
pass
class B(A):
pass
class C(models.Base):
pass
assert A() == A()
assert A() != B()
assert B() != A()
assert A() != C()
def test_equality_of_simple_models():
class Person(models.Base):
name = fields.StringField()
age = fields.IntField()
p1 = Person(name='Jack')
p2 = Person(name='Jack')
assert p1 == p2
assert p2 == p1
p3 = Person(name='Jack', age=100)
assert p1 != p3
assert p3 != p1
p4 = Person(name='Jill')
assert p1 != p4
assert p4 != p1
def test_equality_embedded_objects():
class Person(models.Base):
name = fields.StringField()
class Company(models.Base):
chairman = fields.EmbeddedField(Person)
c1 = Company(chairman=Person(name='Pete'))
c2 = Company(chairman=Person(name='Pete'))
assert c1 == c2
assert c2 == c1
c3 = Company(chairman=Person(name='Joshua'))
assert c1 != c3
assert c3 != c1
def test_equality_list_fields():
class Wheel(models.Base):
pressure = fields.FloatField()
class Car(models.Base):
wheels = fields.ListField(items_types=[Wheel])
car = Car(
wheels=[
Wheel(pressure=1),
Wheel(pressure=2),
Wheel(pressure=3),
Wheel(pressure=4),
],
)
another_car = Car(
wheels=[
Wheel(pressure=1),
Wheel(pressure=2),
Wheel(pressure=3),
Wheel(pressure=4),
],
)
assert car == another_car
different_car = Car(
wheels=[
Wheel(pressure=4),
Wheel(pressure=3),
Wheel(pressure=2),
Wheel(pressure=1),
],
)
assert car != different_car
def test_equality_missing_required_field():
class Model(models.Base):
name = fields.StringField(required=True)
age = fields.IntField()
assert Model(age=1) == Model(age=1)
assert Model(age=1) != Model(age=2)
assert Model(name='William', age=1) != Model(age=1)
|
|
# -*- coding: utf-8 -*-
"""
Tests for Fiscal Year and Fiscal Quarter offset classes
"""
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pytest
import pandas.util.testing as tm
from pandas import Timestamp
from pandas.tseries.frequencies import get_offset
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG
from pandas.tseries.offsets import FY5253Quarter, FY5253
from .common import assert_offset_equal, assert_onOffset
from .test_offsets import Base, WeekDay
def makeFY5253LastOfMonthQuarter(*args, **kwds):
return FY5253Quarter(*args, variation="last", **kwds)
def makeFY5253NearestEndMonthQuarter(*args, **kwds):
return FY5253Quarter(*args, variation="nearest", **kwds)
def makeFY5253NearestEndMonth(*args, **kwds):
return FY5253(*args, variation="nearest", **kwds)
def makeFY5253LastOfMonth(*args, **kwds):
return FY5253(*args, variation="last", **kwds)
def test_get_offset_name():
assert (makeFY5253LastOfMonthQuarter(
weekday=1, startingMonth=3,
qtr_with_extra_week=4).freqstr == "REQ-L-MAR-TUE-4")
assert (makeFY5253NearestEndMonthQuarter(
weekday=1, startingMonth=3,
qtr_with_extra_week=3).freqstr == "REQ-N-MAR-TUE-3")
def test_get_offset():
with tm.assert_raises_regex(ValueError, INVALID_FREQ_ERR_MSG):
get_offset('gibberish')
with tm.assert_raises_regex(ValueError, INVALID_FREQ_ERR_MSG):
get_offset('QS-JAN-B')
pairs = [
("RE-N-DEC-MON",
makeFY5253NearestEndMonth(weekday=0, startingMonth=12)),
("RE-L-DEC-TUE",
makeFY5253LastOfMonth(weekday=1, startingMonth=12)),
("REQ-L-MAR-TUE-4",
makeFY5253LastOfMonthQuarter(weekday=1,
startingMonth=3,
qtr_with_extra_week=4)),
("REQ-L-DEC-MON-3",
makeFY5253LastOfMonthQuarter(weekday=0,
startingMonth=12,
qtr_with_extra_week=3)),
("REQ-N-DEC-MON-3",
makeFY5253NearestEndMonthQuarter(weekday=0,
startingMonth=12,
qtr_with_extra_week=3))]
for name, expected in pairs:
offset = get_offset(name)
assert offset == expected, ("Expected %r to yield %r (actual: %r)" %
(name, expected, offset))
class TestFY5253LastOfMonth(Base):
offset_lom_sat_aug = makeFY5253LastOfMonth(1, startingMonth=8,
weekday=WeekDay.SAT)
offset_lom_sat_sep = makeFY5253LastOfMonth(1, startingMonth=9,
weekday=WeekDay.SAT)
on_offset_cases = [
# From Wikipedia (see:
# http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Last_Saturday_of_the_month_at_fiscal_year_end)
(offset_lom_sat_aug, datetime(2006, 8, 26), True),
(offset_lom_sat_aug, datetime(2007, 8, 25), True),
(offset_lom_sat_aug, datetime(2008, 8, 30), True),
(offset_lom_sat_aug, datetime(2009, 8, 29), True),
(offset_lom_sat_aug, datetime(2010, 8, 28), True),
(offset_lom_sat_aug, datetime(2011, 8, 27), True),
(offset_lom_sat_aug, datetime(2012, 8, 25), True),
(offset_lom_sat_aug, datetime(2013, 8, 31), True),
(offset_lom_sat_aug, datetime(2014, 8, 30), True),
(offset_lom_sat_aug, datetime(2015, 8, 29), True),
(offset_lom_sat_aug, datetime(2016, 8, 27), True),
(offset_lom_sat_aug, datetime(2017, 8, 26), True),
(offset_lom_sat_aug, datetime(2018, 8, 25), True),
(offset_lom_sat_aug, datetime(2019, 8, 31), True),
(offset_lom_sat_aug, datetime(2006, 8, 27), False),
(offset_lom_sat_aug, datetime(2007, 8, 28), False),
(offset_lom_sat_aug, datetime(2008, 8, 31), False),
(offset_lom_sat_aug, datetime(2009, 8, 30), False),
(offset_lom_sat_aug, datetime(2010, 8, 29), False),
(offset_lom_sat_aug, datetime(2011, 8, 28), False),
(offset_lom_sat_aug, datetime(2006, 8, 25), False),
(offset_lom_sat_aug, datetime(2007, 8, 24), False),
(offset_lom_sat_aug, datetime(2008, 8, 29), False),
(offset_lom_sat_aug, datetime(2009, 8, 28), False),
(offset_lom_sat_aug, datetime(2010, 8, 27), False),
(offset_lom_sat_aug, datetime(2011, 8, 26), False),
(offset_lom_sat_aug, datetime(2019, 8, 30), False),
# From GMCR (see for example:
# http://yahoo.brand.edgar-online.com/Default.aspx?
# companyid=3184&formtypeID=7)
(offset_lom_sat_sep, datetime(2010, 9, 25), True),
(offset_lom_sat_sep, datetime(2011, 9, 24), True),
(offset_lom_sat_sep, datetime(2012, 9, 29), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
def test_apply(self):
offset_lom_aug_sat = makeFY5253LastOfMonth(startingMonth=8,
weekday=WeekDay.SAT)
offset_lom_aug_sat_1 = makeFY5253LastOfMonth(n=1, startingMonth=8,
weekday=WeekDay.SAT)
date_seq_lom_aug_sat = [datetime(2006, 8, 26), datetime(2007, 8, 25),
datetime(2008, 8, 30), datetime(2009, 8, 29),
datetime(2010, 8, 28), datetime(2011, 8, 27),
datetime(2012, 8, 25), datetime(2013, 8, 31),
datetime(2014, 8, 30), datetime(2015, 8, 29),
datetime(2016, 8, 27)]
tests = [
(offset_lom_aug_sat, date_seq_lom_aug_sat),
(offset_lom_aug_sat_1, date_seq_lom_aug_sat),
(offset_lom_aug_sat, [
datetime(2006, 8, 25)] + date_seq_lom_aug_sat),
(offset_lom_aug_sat_1, [
datetime(2006, 8, 27)] + date_seq_lom_aug_sat[1:]),
(makeFY5253LastOfMonth(n=-1, startingMonth=8,
weekday=WeekDay.SAT),
list(reversed(date_seq_lom_aug_sat))),
]
for test in tests:
offset, data = test
current = data[0]
for datum in data[1:]:
current = current + offset
assert current == datum
class TestFY5253NearestEndMonth(Base):
def test_get_year_end(self):
assert (makeFY5253NearestEndMonth(
startingMonth=8, weekday=WeekDay.SAT).get_year_end(
datetime(2013, 1, 1)) == datetime(2013, 8, 31))
assert (makeFY5253NearestEndMonth(
startingMonth=8, weekday=WeekDay.SUN).get_year_end(
datetime(2013, 1, 1)) == datetime(2013, 9, 1))
assert (makeFY5253NearestEndMonth(
startingMonth=8, weekday=WeekDay.FRI).get_year_end(
datetime(2013, 1, 1)) == datetime(2013, 8, 30))
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
variation="nearest")
assert (offset_n.get_year_end(datetime(2012, 1, 1)) ==
datetime(2013, 1, 1))
assert (offset_n.get_year_end(datetime(2012, 1, 10)) ==
datetime(2013, 1, 1))
assert (offset_n.get_year_end(datetime(2013, 1, 1)) ==
datetime(2013, 12, 31))
assert (offset_n.get_year_end(datetime(2013, 1, 2)) ==
datetime(2013, 12, 31))
assert (offset_n.get_year_end(datetime(2013, 1, 3)) ==
datetime(2013, 12, 31))
assert (offset_n.get_year_end(datetime(2013, 1, 10)) ==
datetime(2013, 12, 31))
JNJ = FY5253(n=1, startingMonth=12, weekday=6, variation="nearest")
assert (JNJ.get_year_end(datetime(2006, 1, 1)) ==
datetime(2006, 12, 31))
offset_lom_aug_sat = makeFY5253NearestEndMonth(1, startingMonth=8,
weekday=WeekDay.SAT)
offset_lom_aug_thu = makeFY5253NearestEndMonth(1, startingMonth=8,
weekday=WeekDay.THU)
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
variation="nearest")
on_offset_cases = [
# From Wikipedia (see:
# http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
# #Saturday_nearest_the_end_of_month)
# 2006-09-02 2006 September 2
# 2007-09-01 2007 September 1
# 2008-08-30 2008 August 30 (leap year)
# 2009-08-29 2009 August 29
# 2010-08-28 2010 August 28
# 2011-09-03 2011 September 3
# 2012-09-01 2012 September 1 (leap year)
# 2013-08-31 2013 August 31
# 2014-08-30 2014 August 30
# 2015-08-29 2015 August 29
# 2016-09-03 2016 September 3 (leap year)
# 2017-09-02 2017 September 2
# 2018-09-01 2018 September 1
# 2019-08-31 2019 August 31
(offset_lom_aug_sat, datetime(2006, 9, 2), True),
(offset_lom_aug_sat, datetime(2007, 9, 1), True),
(offset_lom_aug_sat, datetime(2008, 8, 30), True),
(offset_lom_aug_sat, datetime(2009, 8, 29), True),
(offset_lom_aug_sat, datetime(2010, 8, 28), True),
(offset_lom_aug_sat, datetime(2011, 9, 3), True),
(offset_lom_aug_sat, datetime(2016, 9, 3), True),
(offset_lom_aug_sat, datetime(2017, 9, 2), True),
(offset_lom_aug_sat, datetime(2018, 9, 1), True),
(offset_lom_aug_sat, datetime(2019, 8, 31), True),
(offset_lom_aug_sat, datetime(2006, 8, 27), False),
(offset_lom_aug_sat, datetime(2007, 8, 28), False),
(offset_lom_aug_sat, datetime(2008, 8, 31), False),
(offset_lom_aug_sat, datetime(2009, 8, 30), False),
(offset_lom_aug_sat, datetime(2010, 8, 29), False),
(offset_lom_aug_sat, datetime(2011, 8, 28), False),
(offset_lom_aug_sat, datetime(2006, 8, 25), False),
(offset_lom_aug_sat, datetime(2007, 8, 24), False),
(offset_lom_aug_sat, datetime(2008, 8, 29), False),
(offset_lom_aug_sat, datetime(2009, 8, 28), False),
(offset_lom_aug_sat, datetime(2010, 8, 27), False),
(offset_lom_aug_sat, datetime(2011, 8, 26), False),
(offset_lom_aug_sat, datetime(2019, 8, 30), False),
# From Micron, see:
# http://google.brand.edgar-online.com/?sym=MU&formtypeID=7
(offset_lom_aug_thu, datetime(2012, 8, 30), True),
(offset_lom_aug_thu, datetime(2011, 9, 1), True),
(offset_n, datetime(2012, 12, 31), False),
(offset_n, datetime(2013, 1, 1), True),
(offset_n, datetime(2013, 1, 2), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
def test_apply(self):
date_seq_nem_8_sat = [datetime(2006, 9, 2), datetime(2007, 9, 1),
datetime(2008, 8, 30), datetime(2009, 8, 29),
datetime(2010, 8, 28), datetime(2011, 9, 3)]
JNJ = [datetime(2005, 1, 2), datetime(2006, 1, 1),
datetime(2006, 12, 31), datetime(2007, 12, 30),
datetime(2008, 12, 28), datetime(2010, 1, 3),
datetime(2011, 1, 2), datetime(2012, 1, 1),
datetime(2012, 12, 30)]
DEC_SAT = FY5253(n=-1, startingMonth=12, weekday=5,
variation="nearest")
tests = [
(makeFY5253NearestEndMonth(startingMonth=8,
weekday=WeekDay.SAT),
date_seq_nem_8_sat),
(makeFY5253NearestEndMonth(n=1, startingMonth=8,
weekday=WeekDay.SAT),
date_seq_nem_8_sat),
(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT),
[datetime(2006, 9, 1)] + date_seq_nem_8_sat),
(makeFY5253NearestEndMonth(n=1, startingMonth=8,
weekday=WeekDay.SAT),
[datetime(2006, 9, 3)] + date_seq_nem_8_sat[1:]),
(makeFY5253NearestEndMonth(n=-1, startingMonth=8,
weekday=WeekDay.SAT),
list(reversed(date_seq_nem_8_sat))),
(makeFY5253NearestEndMonth(n=1, startingMonth=12,
weekday=WeekDay.SUN), JNJ),
(makeFY5253NearestEndMonth(n=-1, startingMonth=12,
weekday=WeekDay.SUN),
list(reversed(JNJ))),
(makeFY5253NearestEndMonth(n=1, startingMonth=12,
weekday=WeekDay.SUN),
[datetime(2005, 1, 2), datetime(2006, 1, 1)]),
(makeFY5253NearestEndMonth(n=1, startingMonth=12,
weekday=WeekDay.SUN),
[datetime(2006, 1, 2), datetime(2006, 12, 31)]),
(DEC_SAT, [datetime(2013, 1, 15), datetime(2012, 12, 29)])
]
for test in tests:
offset, data = test
current = data[0]
for datum in data[1:]:
current = current + offset
assert current == datum
class TestFY5253LastOfMonthQuarter(Base):
def test_isAnchored(self):
assert makeFY5253LastOfMonthQuarter(
startingMonth=1, weekday=WeekDay.SAT,
qtr_with_extra_week=4).isAnchored()
assert makeFY5253LastOfMonthQuarter(
weekday=WeekDay.SAT, startingMonth=3,
qtr_with_extra_week=4).isAnchored()
assert not makeFY5253LastOfMonthQuarter(
2, startingMonth=1, weekday=WeekDay.SAT,
qtr_with_extra_week=4).isAnchored()
def test_equality(self):
assert (makeFY5253LastOfMonthQuarter(
startingMonth=1, weekday=WeekDay.SAT,
qtr_with_extra_week=4) == makeFY5253LastOfMonthQuarter(
startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4))
assert (makeFY5253LastOfMonthQuarter(
startingMonth=1, weekday=WeekDay.SAT,
qtr_with_extra_week=4) != makeFY5253LastOfMonthQuarter(
startingMonth=1, weekday=WeekDay.SUN, qtr_with_extra_week=4))
assert (makeFY5253LastOfMonthQuarter(
startingMonth=1, weekday=WeekDay.SAT,
qtr_with_extra_week=4) != makeFY5253LastOfMonthQuarter(
startingMonth=2, weekday=WeekDay.SAT, qtr_with_extra_week=4))
def test_offset(self):
offset = makeFY5253LastOfMonthQuarter(1, startingMonth=9,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
offset2 = makeFY5253LastOfMonthQuarter(2, startingMonth=9,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
offset4 = makeFY5253LastOfMonthQuarter(4, startingMonth=9,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
offset_neg1 = makeFY5253LastOfMonthQuarter(-1, startingMonth=9,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
offset_neg2 = makeFY5253LastOfMonthQuarter(-2, startingMonth=9,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
GMCR = [datetime(2010, 3, 27), datetime(2010, 6, 26),
datetime(2010, 9, 25), datetime(2010, 12, 25),
datetime(2011, 3, 26), datetime(2011, 6, 25),
datetime(2011, 9, 24), datetime(2011, 12, 24),
datetime(2012, 3, 24), datetime(2012, 6, 23),
datetime(2012, 9, 29), datetime(2012, 12, 29),
datetime(2013, 3, 30), datetime(2013, 6, 29)]
assert_offset_equal(offset, base=GMCR[0], expected=GMCR[1])
assert_offset_equal(offset, base=GMCR[0] + relativedelta(days=-1),
expected=GMCR[0])
assert_offset_equal(offset, base=GMCR[1], expected=GMCR[2])
assert_offset_equal(offset2, base=GMCR[0], expected=GMCR[2])
assert_offset_equal(offset4, base=GMCR[0], expected=GMCR[4])
assert_offset_equal(offset_neg1, base=GMCR[-1], expected=GMCR[-2])
assert_offset_equal(offset_neg1,
base=GMCR[-1] + relativedelta(days=+1),
expected=GMCR[-1])
assert_offset_equal(offset_neg2, base=GMCR[-1], expected=GMCR[-3])
date = GMCR[0] + relativedelta(days=-1)
for expected in GMCR:
assert_offset_equal(offset, date, expected)
date = date + offset
date = GMCR[-1] + relativedelta(days=+1)
for expected in reversed(GMCR):
assert_offset_equal(offset_neg1, date, expected)
date = date + offset_neg1
lomq_aug_sat_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=8,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
lomq_sep_sat_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=9,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
on_offset_cases = [
# From Wikipedia
(lomq_aug_sat_4, datetime(2006, 8, 26), True),
(lomq_aug_sat_4, datetime(2007, 8, 25), True),
(lomq_aug_sat_4, datetime(2008, 8, 30), True),
(lomq_aug_sat_4, datetime(2009, 8, 29), True),
(lomq_aug_sat_4, datetime(2010, 8, 28), True),
(lomq_aug_sat_4, datetime(2011, 8, 27), True),
(lomq_aug_sat_4, datetime(2019, 8, 31), True),
(lomq_aug_sat_4, datetime(2006, 8, 27), False),
(lomq_aug_sat_4, datetime(2007, 8, 28), False),
(lomq_aug_sat_4, datetime(2008, 8, 31), False),
(lomq_aug_sat_4, datetime(2009, 8, 30), False),
(lomq_aug_sat_4, datetime(2010, 8, 29), False),
(lomq_aug_sat_4, datetime(2011, 8, 28), False),
(lomq_aug_sat_4, datetime(2006, 8, 25), False),
(lomq_aug_sat_4, datetime(2007, 8, 24), False),
(lomq_aug_sat_4, datetime(2008, 8, 29), False),
(lomq_aug_sat_4, datetime(2009, 8, 28), False),
(lomq_aug_sat_4, datetime(2010, 8, 27), False),
(lomq_aug_sat_4, datetime(2011, 8, 26), False),
(lomq_aug_sat_4, datetime(2019, 8, 30), False),
# From GMCR
(lomq_sep_sat_4, datetime(2010, 9, 25), True),
(lomq_sep_sat_4, datetime(2011, 9, 24), True),
(lomq_sep_sat_4, datetime(2012, 9, 29), True),
(lomq_sep_sat_4, datetime(2013, 6, 29), True),
(lomq_sep_sat_4, datetime(2012, 6, 23), True),
(lomq_sep_sat_4, datetime(2012, 6, 30), False),
(lomq_sep_sat_4, datetime(2013, 3, 30), True),
(lomq_sep_sat_4, datetime(2012, 3, 24), True),
(lomq_sep_sat_4, datetime(2012, 12, 29), True),
(lomq_sep_sat_4, datetime(2011, 12, 24), True),
# INTC (extra week in Q1)
# See: http://www.intc.com/releasedetail.cfm?ReleaseID=542844
(makeFY5253LastOfMonthQuarter(1, startingMonth=12,
weekday=WeekDay.SAT,
qtr_with_extra_week=1),
datetime(2011, 4, 2), True),
# see: http://google.brand.edgar-online.com/?sym=INTC&formtypeID=7
(makeFY5253LastOfMonthQuarter(1, startingMonth=12,
weekday=WeekDay.SAT,
qtr_with_extra_week=1),
datetime(2012, 12, 29), True),
(makeFY5253LastOfMonthQuarter(1, startingMonth=12,
weekday=WeekDay.SAT,
qtr_with_extra_week=1),
datetime(2011, 12, 31), True),
(makeFY5253LastOfMonthQuarter(1, startingMonth=12,
weekday=WeekDay.SAT,
qtr_with_extra_week=1),
datetime(2010, 12, 25), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
def test_year_has_extra_week(self):
# End of long Q1
assert makeFY5253LastOfMonthQuarter(
1, startingMonth=12, weekday=WeekDay.SAT,
qtr_with_extra_week=1).year_has_extra_week(datetime(2011, 4, 2))
# Start of long Q1
assert makeFY5253LastOfMonthQuarter(
1, startingMonth=12, weekday=WeekDay.SAT,
qtr_with_extra_week=1).year_has_extra_week(datetime(2010, 12, 26))
# End of year before year with long Q1
assert not makeFY5253LastOfMonthQuarter(
1, startingMonth=12, weekday=WeekDay.SAT,
qtr_with_extra_week=1).year_has_extra_week(datetime(2010, 12, 25))
for year in [x
for x in range(1994, 2011 + 1)
if x not in [2011, 2005, 2000, 1994]]:
assert not makeFY5253LastOfMonthQuarter(
1, startingMonth=12, weekday=WeekDay.SAT,
qtr_with_extra_week=1).year_has_extra_week(
datetime(year, 4, 2))
# Other long years
assert makeFY5253LastOfMonthQuarter(
1, startingMonth=12, weekday=WeekDay.SAT,
qtr_with_extra_week=1).year_has_extra_week(datetime(2005, 4, 2))
assert makeFY5253LastOfMonthQuarter(
1, startingMonth=12, weekday=WeekDay.SAT,
qtr_with_extra_week=1).year_has_extra_week(datetime(2000, 4, 2))
assert makeFY5253LastOfMonthQuarter(
1, startingMonth=12, weekday=WeekDay.SAT,
qtr_with_extra_week=1).year_has_extra_week(datetime(1994, 4, 2))
def test_get_weeks(self):
sat_dec_1 = makeFY5253LastOfMonthQuarter(1, startingMonth=12,
weekday=WeekDay.SAT,
qtr_with_extra_week=1)
sat_dec_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=12,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
assert sat_dec_1.get_weeks(datetime(2011, 4, 2)) == [14, 13, 13, 13]
assert sat_dec_4.get_weeks(datetime(2011, 4, 2)) == [13, 13, 13, 14]
assert sat_dec_1.get_weeks(datetime(2010, 12, 25)) == [13, 13, 13, 13]
class TestFY5253NearestEndMonthQuarter(Base):
offset_nem_sat_aug_4 = makeFY5253NearestEndMonthQuarter(
1, startingMonth=8, weekday=WeekDay.SAT,
qtr_with_extra_week=4)
offset_nem_thu_aug_4 = makeFY5253NearestEndMonthQuarter(
1, startingMonth=8, weekday=WeekDay.THU,
qtr_with_extra_week=4)
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
variation="nearest")
on_offset_cases = [
# From Wikipedia
(offset_nem_sat_aug_4, datetime(2006, 9, 2), True),
(offset_nem_sat_aug_4, datetime(2007, 9, 1), True),
(offset_nem_sat_aug_4, datetime(2008, 8, 30), True),
(offset_nem_sat_aug_4, datetime(2009, 8, 29), True),
(offset_nem_sat_aug_4, datetime(2010, 8, 28), True),
(offset_nem_sat_aug_4, datetime(2011, 9, 3), True),
(offset_nem_sat_aug_4, datetime(2016, 9, 3), True),
(offset_nem_sat_aug_4, datetime(2017, 9, 2), True),
(offset_nem_sat_aug_4, datetime(2018, 9, 1), True),
(offset_nem_sat_aug_4, datetime(2019, 8, 31), True),
(offset_nem_sat_aug_4, datetime(2006, 8, 27), False),
(offset_nem_sat_aug_4, datetime(2007, 8, 28), False),
(offset_nem_sat_aug_4, datetime(2008, 8, 31), False),
(offset_nem_sat_aug_4, datetime(2009, 8, 30), False),
(offset_nem_sat_aug_4, datetime(2010, 8, 29), False),
(offset_nem_sat_aug_4, datetime(2011, 8, 28), False),
(offset_nem_sat_aug_4, datetime(2006, 8, 25), False),
(offset_nem_sat_aug_4, datetime(2007, 8, 24), False),
(offset_nem_sat_aug_4, datetime(2008, 8, 29), False),
(offset_nem_sat_aug_4, datetime(2009, 8, 28), False),
(offset_nem_sat_aug_4, datetime(2010, 8, 27), False),
(offset_nem_sat_aug_4, datetime(2011, 8, 26), False),
(offset_nem_sat_aug_4, datetime(2019, 8, 30), False),
# From Micron, see:
# http://google.brand.edgar-online.com/?sym=MU&formtypeID=7
(offset_nem_thu_aug_4, datetime(2012, 8, 30), True),
(offset_nem_thu_aug_4, datetime(2011, 9, 1), True),
# See: http://google.brand.edgar-online.com/?sym=MU&formtypeID=13
(offset_nem_thu_aug_4, datetime(2013, 5, 30), True),
(offset_nem_thu_aug_4, datetime(2013, 2, 28), True),
(offset_nem_thu_aug_4, datetime(2012, 11, 29), True),
(offset_nem_thu_aug_4, datetime(2012, 5, 31), True),
(offset_nem_thu_aug_4, datetime(2007, 3, 1), True),
(offset_nem_thu_aug_4, datetime(1994, 3, 3), True),
(offset_n, datetime(2012, 12, 31), False),
(offset_n, datetime(2013, 1, 1), True),
(offset_n, datetime(2013, 1, 2), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
def test_offset(self):
offset = makeFY5253NearestEndMonthQuarter(1, startingMonth=8,
weekday=WeekDay.THU,
qtr_with_extra_week=4)
MU = [datetime(2012, 5, 31),
datetime(2012, 8, 30), datetime(2012, 11, 29),
datetime(2013, 2, 28), datetime(2013, 5, 30)]
date = MU[0] + relativedelta(days=-1)
for expected in MU:
assert_offset_equal(offset, date, expected)
date = date + offset
assert_offset_equal(offset,
datetime(2012, 5, 31),
datetime(2012, 8, 30))
assert_offset_equal(offset,
datetime(2012, 5, 30),
datetime(2012, 5, 31))
offset2 = FY5253Quarter(weekday=5, startingMonth=12, variation="last",
qtr_with_extra_week=4)
assert_offset_equal(offset2,
datetime(2013, 1, 15),
datetime(2013, 3, 30))
def test_bunched_yearends():
# GH#14774 cases with two fiscal year-ends in the same calendar-year
fy = FY5253(n=1, weekday=5, startingMonth=12, variation='nearest')
dt = Timestamp('2004-01-01')
assert fy.rollback(dt) == Timestamp('2002-12-28')
assert (-fy).apply(dt) == Timestamp('2002-12-28')
assert dt - fy == Timestamp('2002-12-28')
assert fy.rollforward(dt) == Timestamp('2004-01-03')
assert fy.apply(dt) == Timestamp('2004-01-03')
assert fy + dt == Timestamp('2004-01-03')
assert dt + fy == Timestamp('2004-01-03')
# Same thing, but starting from a Timestamp in the previous year.
dt = Timestamp('2003-12-31')
assert fy.rollback(dt) == Timestamp('2002-12-28')
assert (-fy).apply(dt) == Timestamp('2002-12-28')
assert dt - fy == Timestamp('2002-12-28')
def test_fy5253_last_onoffset():
# GH#18877 dates on the year-end but not normalized to midnight
offset = FY5253(n=-5, startingMonth=5, variation="last", weekday=0)
ts = Timestamp('1984-05-28 06:29:43.955911354+0200',
tz='Europe/San_Marino')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
def test_fy5253_nearest_onoffset():
# GH#18877 dates on the year-end but not normalized to midnight
offset = FY5253(n=3, startingMonth=7, variation="nearest", weekday=2)
ts = Timestamp('2032-07-28 00:12:59.035729419+0000', tz='Africa/Dakar')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
def test_fy5253qtr_onoffset_nearest():
# GH#19036
ts = Timestamp('1985-09-02 23:57:46.232550356-0300',
tz='Atlantic/Bermuda')
offset = FY5253Quarter(n=3, qtr_with_extra_week=1, startingMonth=2,
variation="nearest", weekday=0)
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
def test_fy5253qtr_onoffset_last():
# GH#19036
offset = FY5253Quarter(n=-2, qtr_with_extra_week=1,
startingMonth=7, variation="last", weekday=2)
ts = Timestamp('2011-01-26 19:03:40.331096129+0200',
tz='Africa/Windhoek')
slow = (ts + offset) - offset == ts
fast = offset.onOffset(ts)
assert fast == slow
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010, 2011, 2012 Sebastian Wiesner <lunaryorn@gmail.com>
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
_libudev
========
Wrapper types for libudev. Use ``libudev`` attribute to access libudev
functions.
.. moduleauthor:: Sebastian Wiesner <lunaryorn@gmail.com>
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
import os
import errno
from ctypes import (CDLL, Structure, POINTER, get_errno,
c_char, c_char_p, c_int, c_ulonglong)
from ctypes.util import find_library
class udev(Structure):
"""
Dummy for ``udev`` structure.
"""
pass
udev_p = POINTER(udev)
class udev_enumerate(Structure):
"""
Dummy for ``udev_enumerate`` structure.
"""
udev_enumerate_p = POINTER(udev_enumerate)
class udev_list_entry(Structure):
"""
Dummy for ``udev_list_entry`` structure.
"""
udev_list_entry_p = POINTER(udev_list_entry)
class udev_device(Structure):
"""
Dummy for ``udev_device`` structure.
"""
udev_device_p = POINTER(udev_device)
class udev_monitor(Structure):
"""
Dummy for ``udev_device`` structure.
"""
udev_monitor_p = POINTER(udev_monitor)
dev_t = c_ulonglong
SIGNATURES = {
# context
'udev': dict(
new=([], udev_p),
unref=([udev_p], None),
ref=([udev_p], udev_p),
get_sys_path=([udev_p], c_char_p),
get_dev_path=([udev_p], c_char_p),
get_run_path=([udev_p], c_char_p),
get_log_priority=([udev_p], c_int),
set_log_priority=([udev_p, c_int], None)),
# enumeration
'udev_enumerate': dict(
new=([udev_p], udev_enumerate_p),
ref=([udev_enumerate_p], udev_enumerate_p),
unref=([udev_enumerate_p], None),
add_match_subsystem=([udev_enumerate_p, c_char_p], c_int),
add_nomatch_subsystem=([udev_enumerate_p, c_char_p], c_int),
add_match_property=([udev_enumerate_p, c_char_p, c_char_p], c_int),
add_match_sysattr=([udev_enumerate_p, c_char_p, c_char_p], c_int),
add_nomatch_sysattr=([udev_enumerate_p, c_char_p, c_char_p], c_int),
add_match_tag=([udev_enumerate_p, c_char_p], c_int),
add_match_sysname=([udev_enumerate_p, c_char_p], c_int),
add_match_parent=([udev_enumerate_p, udev_device_p], c_int),
add_match_is_initialized=([udev_enumerate_p], c_int),
scan_devices=([udev_enumerate_p], c_int),
get_list_entry=([udev_enumerate_p], udev_list_entry_p)),
# list entries
'udev_list_entry': dict(
get_next=([udev_list_entry_p], udev_list_entry_p),
get_name=([udev_list_entry_p], c_char_p),
get_value=([udev_list_entry_p], c_char_p)),
# devices
'udev_device': dict(
ref=([udev_device_p], udev_device_p),
unref=([udev_device_p], None),
new_from_syspath=([udev_p, c_char_p], udev_device_p),
new_from_subsystem_sysname=([udev_p, c_char_p, c_char_p],
udev_device_p),
new_from_devnum=([udev_p, c_char, dev_t], udev_device_p),
new_from_environment=([udev_p], udev_device_p),
get_parent=([udev_device_p], udev_device_p),
get_parent_with_subsystem_devtype=([udev_device_p, c_char_p, c_char_p],
udev_device_p),
get_devpath=([udev_device_p], c_char_p),
get_subsystem=([udev_device_p], c_char_p),
get_syspath=([udev_device_p], c_char_p),
get_sysnum=([udev_device_p], c_char_p),
get_sysname=([udev_device_p], c_char_p),
get_driver=([udev_device_p], c_char_p),
get_devtype=([udev_device_p], c_char_p),
get_devnode=([udev_device_p], c_char_p),
get_property_value=([udev_device_p, c_char_p], c_char_p),
get_sysattr_value=([udev_device_p, c_char_p], c_char_p),
get_devnum=([udev_device_p], dev_t),
get_action=([udev_device_p], c_char_p),
get_seqnum=([udev_device_p], c_ulonglong),
get_is_initialized=([udev_device_p], c_int),
get_usec_since_initialized=([udev_device_p], c_ulonglong),
get_devlinks_list_entry=([udev_device_p], udev_list_entry_p),
get_tags_list_entry=([udev_device_p], udev_list_entry_p),
get_properties_list_entry=([udev_device_p], udev_list_entry_p),
get_sysattr_list_entry=([udev_device_p], udev_list_entry_p),
has_tag=([udev_device_p, c_char_p], c_int)),
# monitoring
'udev_monitor': dict(
ref=([udev_monitor_p], udev_monitor_p),
unref=([udev_monitor_p], None),
new_from_netlink=([udev_p, c_char_p], udev_monitor_p),
enable_receiving=([udev_monitor_p], c_int),
set_receive_buffer_size=([udev_monitor_p, c_int], c_int),
get_fd=([udev_monitor_p], c_int),
receive_device=([udev_monitor_p], udev_device_p),
filter_add_match_subsystem_devtype=(
[udev_monitor_p, c_char_p, c_char_p], c_int),
filter_add_match_tag=([udev_monitor_p, c_char_p], c_int),
filter_update=([udev_monitor_p], c_int),
filter_remove=([udev_monitor_p], c_int))
}
ERRNO_EXCEPTIONS = {
errno.ENOMEM: MemoryError,
errno.EOVERFLOW: OverflowError,
errno.EINVAL: ValueError
}
def exception_from_errno(errno):
"""
Create an exception from ``errno``.
``errno`` is an integral error number.
Return an exception object appropriate to ``errno``.
"""
exception = ERRNO_EXCEPTIONS.get(errno)
if exception is not None:
return exception()
else:
return EnvironmentError(errno, os.strerror(errno))
def check_negative_errorcode(result, func, *args):
"""
Error checker for udev funtions, which return negative error codes.
If ``result`` is smaller than ``0``, it is interpreted as negative error
code, and an appropriate exception is raised:
- ``-ENOMEM`` raises a :exc:`~exceptions.MemoryError`
- ``-EOVERFLOW`` raises a :exc:`~exceptions.OverflowError`
- all other error codes raise :exc:`~exceptions.EnvironmentError`
If result is greater or equal to ``0``, it is returned unchanged.
"""
if result < 0:
# udev returns the *negative* errno code at this point
errno = -result
raise exception_from_errno(errno)
else:
return result
def check_errno(result, func, *args):
"""
Error checker to check the system ``errno`` as returned by
:func:`ctypes.get_errno()`.
If ``result`` is not ``0``, an exception according to this errno is raised.
Otherwise nothing happens.
"""
if result != 0:
errno = get_errno()
if errno != 0:
raise exception_from_errno(errno)
return result
def check_errno_on_null_pointer(result, func, *args):
"""
Error checker to check the system ``errno`` as returned by
:func:`ctypes.get_errno()`.
If ``result`` is a null pointer, an exception according to this errno is
raised. Otherwise nothing happens.
"""
if not result:
errno = get_errno()
if errno != 0:
raise exception_from_errno(errno)
return result
ERROR_CHECKERS = dict(
udev_enumerate_add_match_parent=check_negative_errorcode,
udev_enumerate_add_match_subsystem=check_negative_errorcode,
udev_enumerate_add_nomatch_subsystem=check_negative_errorcode,
udev_enumerate_add_match_property=check_negative_errorcode,
udev_enumerate_add_match_sysattr=check_negative_errorcode,
udev_enumerate_add_nomatch_sysattr=check_negative_errorcode,
udev_enumerate_add_match_tag=check_negative_errorcode,
udev_enumerate_add_match_sysname=check_negative_errorcode,
udev_enumerate_add_match_is_initialized=check_negative_errorcode,
udev_monitor_set_receive_buffer_size=check_errno,
# libudev doc says, enable_receiving returns a negative errno, but tests
# show that this is not reliable, so query the real error code
udev_monitor_enable_receiving=check_errno,
udev_monitor_receive_device=check_errno_on_null_pointer,
udev_monitor_filter_add_match_subsystem_devtype=check_negative_errorcode,
udev_monitor_filter_add_match_tag=check_negative_errorcode,
udev_monitor_filter_update=check_errno,
udev_monitor_filter_remove=check_errno,
)
def load_udev_library():
"""
Load the ``udev`` library and return a :class:`ctypes.CDLL` object for
it. The library has errno handling enabled.
Important functions are given proper signatures and return types to
support type checking and argument conversion.
Raise :exc:`~exceptions.ImportError`, if the udev library was not found.
"""
if 'PYUDEV_UDEV_LIBRARY_NAME' in os.environ:
udev_library_name = os.environ['PYUDEV_UDEV_LIBRARY_NAME']
else:
udev_library_name = find_library('udev')
if not udev_library_name:
raise ImportError('No library named udev, consider setting PYUDEV_UDEV_LIBRARY_NAME')
libudev = CDLL(udev_library_name, use_errno=True)
# context function signature
for namespace, members in SIGNATURES.items():
for funcname in members:
fullname = '{0}_{1}'.format(namespace, funcname)
func = getattr(libudev, fullname, None)
if func:
argtypes, restype = members[funcname]
func.argtypes = argtypes
func.restype = restype
errorchecker = ERROR_CHECKERS.get(fullname)
if errorchecker:
func.errcheck = errorchecker
return libudev
libudev = load_udev_library()
|
|
from core.himesis import Himesis, HimesisPostConditionPattern
import cPickle as pickle
from uuid import UUID
class HMoveOneInputRepeatedDirectMatchDiffRulesRHS(HimesisPostConditionPattern):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMoveOneInputRepeatedDirectMatchDiffRulesRHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMoveOneInputRepeatedDirectMatchDiffRulesRHS, self).__init__(name='HMoveOneInputRepeatedDirectMatchDiffRulesRHS', num_nodes=4, edges=[])
# Add the edges
self.add_edges([(0, 1), (3, 0)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_post__GM2AUTOSAR_MM'
p2
aS'MoTifRule'
p3
a.""")
self["MT_action__"] = """#===============================================================================
# This code is executed after the rule has been applied.
# You can access a node labelled n matched by this rule by: PostNode('n').
# To access attribute x of node n, use: PostNode('n')['x'].
#===============================================================================
pass
"""
self["name"] = """"""
self["GUID__"] = UUID('d124b8d6-890f-4e4d-b150-15fd31e717c9')
# Set the node attributes
self.vs[0]["MT_post__associationType"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[0]["MT_label__"] = """10"""
self.vs[0]["mm__"] = """MT_post__directLink_S"""
self.vs[0]["GUID__"] = UUID('c0f56362-7e12-4590-9756-33db23950818')
self.vs[1]["MT_pivotOut__"] = """element1"""
self.vs[1]["MT_post__cardinality"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[1]["MT_label__"] = """3"""
self.vs[1]["MT_post__name"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[1]["mm__"] = """MT_post__MetaModelElement_S"""
self.vs[1]["MT_post__classtype"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[1]["GUID__"] = UUID('3d31656d-fcab-4319-b429-f93376763b40')
self.vs[2]["MT_pivotOut__"] = """element2"""
self.vs[2]["MT_post__cardinality"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[2]["MT_label__"] = """4"""
self.vs[2]["MT_post__name"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[2]["mm__"] = """MT_post__MetaModelElement_S"""
self.vs[2]["MT_post__classtype"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[2]["GUID__"] = UUID('e67bdb93-4acd-466f-8034-c44cda0ae8d0')
self.vs[3]["MT_post__cardinality"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[3]["MT_label__"] = """5"""
self.vs[3]["MT_post__name"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[3]["mm__"] = """MT_post__MetaModelElement_S"""
self.vs[3]["MT_post__classtype"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[3]["GUID__"] = UUID('a629f835-159b-4149-ad11-a40743687ab7')
from HMoveOneInputRepeatedDirectMatchDiffRulesLHS import HMoveOneInputRepeatedDirectMatchDiffRulesLHS
self.pre = HMoveOneInputRepeatedDirectMatchDiffRulesLHS()
def action(self, PostNode, graph):
"""
Executable constraint code.
@param PostNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the rule has been applied.
# You can access a node labelled n matched by this rule by: PostNode('n').
# To access attribute x of node n, use: PostNode('n')['x'].
#===============================================================================
pass
def execute(self, packet, match):
"""
Transforms the current match of the packet according to the rule %s.
Pivots are also assigned, if any.
@param packet: The input packet.
@param match: The match to rewrite.
"""
graph = packet.graph
# Build a dictionary {label: node index} mapping each label of the pattern to a node in the graph to rewrite.
# Because of the uniqueness property of labels in a rule, we can store all LHS labels
# and subsequently add the labels corresponding to the nodes to be created.
labels = match.copy()
#===============================================================================
# Update attribute values
#===============================================================================
#===============================================================================
# Create new nodes
#===============================================================================
#===============================================================================
# Create new edges
#===============================================================================
#===============================================================================
# Set the output pivots
#===============================================================================
# MetaModelElement_S3
packet.global_pivots['element1'] = graph.vs[labels['3']][Himesis.Constants.GUID]
# MetaModelElement_S4
packet.global_pivots['element2'] = graph.vs[labels['4']][Himesis.Constants.GUID]
#===============================================================================
# Perform the post-action
#===============================================================================
try:
self.action(lambda i: graph.vs[labels[i]], graph)
except Exception, e:
raise Exception('An error has occurred while applying the post-action', e)
#===============================================================================
# Finally, delete nodes (this will automatically delete the adjacent edges)
#===============================================================================
# MT_pre__directLink_S9
graph.delete_nodes([labels["9"]])
|
|
import syntax_tree
import symbol_table
import code_library
INTEGER_SIZE = 8
class Lazy_generator(object):
def generate(self, tree, table):
self.tree = tree
self.table = table
self.reset_library()
self.handle = -1
self.in_procedure = False
self.procedures = {}
self.read_only_declarations = []
self.code = ['\t.globl\tmain\n']
self.code.append('\t.text')
self.code.append('main:\n')
if tree:
self.generate_instructions(tree.instructions)
self.code.append('__end_program:')
self.code.append('\t\tmovq\t$60, %rax')
self.code.append('\t\tsyscall\n')
printed = []
while self.procedures:
to_print = self.procedures
self.procedures = {}
for procedure in to_print.values():
if not procedure in printed:
self.generate_procedure(procedure)
printed.append(procedure)
self.link_library()
self.generate_variables()
if self.read_only_declarations:
self.code.append('\n\t.section .rodata')
for declaration in self.read_only_declarations:
self.code.append(declaration)
return self.code
def reset_library(self):
self.div_by_zero = False
self.mod_by_zero = False
self.bad_index = False
self.write_output = False
self.read_input = False
def new_handle(self):
self.handle += 1
return self.handle
def generate_instructions(self, instructions):
for instruction in instructions.instructions:
if type(instruction.child) is syntax_tree.Assign:
self.generate_assign(instruction.child)
elif type(instruction.child) is syntax_tree.If:
self.generate_if(instruction.child)
elif type(instruction.child) is syntax_tree.Repeat:
self.generate_repeat(instruction.child)
elif type(instruction.child) is syntax_tree.Read:
self.generate_read(instruction.child)
elif type(instruction.child) is syntax_tree.Call:
self.generate_call(instruction.child)
else: # Write
self.generate_write(instruction.child)
def generate_assign(self, assign):
self.code.append("__assign_at_{}:".format(assign.line))
self.generate_location_evaluator(assign.location)
count = assign.location.type_object.get_size() / INTEGER_SIZE
if count is 1:
self.generate_expression_evaluator(assign.expression)
else:
self.generate_location_evaluator(assign.expression.child)
self.code.append('\t\tpopq\t%rcx')
self.code.append('\t\tpopq\t%rax')
if count > 1:
self.code.append("\t\tmovq\t${}, %rdi".format(count))
handle = self.new_handle()
self.code.append("_loop_{}_:".format(handle))
self.code.append('\t\tmovq\t(%rcx), %rsi')
self.code.append('\t\tmovq\t%rsi, (%rax)')
self.code.append('\t\taddq\t$8, %rcx')
self.code.append('\t\taddq\t$8, %rax')
self.code.append('\t\tdecq\t%rdi')
self.code.append("\t\tjnz\t\t_loop_{}_".format(handle))
else:
self.code.append('\t\tmovq\t%rcx, (%rax)')
def generate_if(self, if_statement):
self.code.append("__if_at_{}:".format(if_statement.line))
self.generate_condition_evaluator(if_statement.condition)
handle = self.handle
self.code.append("_true_{}_:".format(handle))
self.generate_instructions(if_statement.instructions_true)
if if_statement.instructions_false:
self.code.append("\t\tjmp\t\t_end_{}_".format(handle))
self.code.append("_false_{}_:".format(handle))
if if_statement.instructions_false:
self.generate_instructions(if_statement.instructions_false)
self.code.append("_end_{}_:".format(handle))
def generate_repeat(self, repeat):
label = "__repeat_at_{}".format(repeat.line)
self.code.append(label + ':')
self.generate_condition_evaluator(repeat.condition)
handle = self.handle
self.code.append("_false_{}_:".format(handle))
self.generate_instructions(repeat.instructions)
self.code.append("\t\tjmp\t\t{}".format(label))
self.code.append("_true_{}_:".format(handle))
def generate_read(self, read):
self.code.append("__read_at_{}:".format(read.line))
self.code.append('\t\tpushq\t%rbx')
self.generate_location_evaluator(read.location)
self.code.append('\t\tpopq\t%rbx')
self.code.append('\t\tcall\t__read')
self.code.append('\t\tmovq\t%rax, (%rbx)')
self.code.append('\t\tpopq\t%rbx')
self.read_input = True
def generate_call(self, call):
if call.definition.instructions or call.definition.return_expression:
if call.actuals:
for actual in call.actuals:
if (type(actual.child) is syntax_tree.Location and
actual.type_object.get_size() > INTEGER_SIZE):
self.generate_location_evaluator(actual.child)
else:
self.generate_expression_evaluator(actual)
self.code.append("\t\tcall\t__{}__".format(call.definition.name))
if not call.definition.name in self.procedures:
self.procedures[call.definition.name] = call.definition
if call.definition.formals:
n = len(call.definition.formals)
self.code.append("\t\taddq\t${}, %rsp".format(n * INTEGER_SIZE))
def generate_write(self, write):
self.code.append("__write_at_{}:".format(write.line))
self.generate_expression_evaluator(write.expression)
self.code.append('\t\tpopq\t%rdi')
self.code.append('\t\tcall\t__write_stdout')
self.write_output = True
def generate_procedure(self, procedure):
self.code.append("__{}__:".format(procedure.name))
self.code.append('\t\tpushq\t%rbp')
self.code.append('\t\tmovq\t%rsp, %rbp')
self.in_procedure = True
self.formals = procedure.formals
self.local_variables = []
offset = 0
for variable in procedure.scope.symbols:
if not self.formals or not variable in self.formals:
self.local_variables.append(variable)
offset += procedure.scope.symbols[variable].get_size()
times = offset / 8
for i in range(times):
self.code.append('\t\tpushq\t$0')
self.code.append('\t\tpushq\t%rbx')
self.code.append('\t\tleaq\t8(%rsp), %rbx')
if procedure.instructions:
self.generate_instructions(procedure.instructions)
if procedure.return_expression:
self.generate_expression_evaluator(procedure.return_expression)
self.code.append('\t\tpopq\t%rax')
self.in_procedure = False
self.code.append('\t\tpopq\t%rbx')
self.code.append('\t\tmovq\t%rbp, %rsp')
self.code.append('\t\tpopq\t%rbp')
self.code.append('\t\tret')
def generate_variables(self):
symbols = self.table.scopes[1].symbols
printed_data = False
for name, type_object in symbols.iteritems():
if not type(type_object) in [symbol_table.Variable, symbol_table.Array, symbol_table.Record,
symbol_table.Integer]:
continue
if not printed_data:
self.code.append('\n\t.data')
printed_data = True
self.code.append("{}_:\t\t.space {}".format(name, type_object.get_size()))
def generate_condition_evaluator(self, condition):
self.generate_expression_evaluator(condition.expression_left)
if not type(condition.expression_right.child) is syntax_tree.Number:
self.generate_expression_evaluator(condition.expression_right)
self.code.append('\t\tpopq\t%rcx')
self.code.append('\t\tpopq\t%rax')
self.code.append('\t\tcmpq\t%rcx, %rax')
else:
self.code.append('\t\tpopq\t%rax')
value = condition.expression_right.child.table_entry.value
self.code.append("\t\tcmpq\t${}, %rax".format(value))
self.new_handle()
if condition.relation == '=':
self.code.append("\t\tje\t\t_true_{}_".format(self.handle))
self.code.append("\t\tjmp\t\t_false_{}_".format(self.handle))
elif condition.relation == '#':
self.code.append("\t\tjne\t\t_true_{}_".format(self.handle))
self.code.append("\t\tjmp\t\t_false_{}_".format(self.handle))
elif condition.relation == '<':
self.code.append("\t\tjl\t\t_true_{}_".format(self.handle))
self.code.append("\t\tjmp\t\t_false_{}_".format(self.handle))
elif condition.relation == '>':
self.code.append("\t\tjg\t\t_true_{}_".format(self.handle))
self.code.append("\t\tjmp\t\t_false_{}_".format(self.handle))
elif condition.relation == '<=':
self.code.append("\t\tjle\t\t_true_{}_".format(self.handle))
self.code.append("\t\tjmp\t\t_false_{}_".format(self.handle))
else: # >=
self.code.append("\t\tjge\t\t_true_{}_".format(self.handle))
self.code.append("\t\tjmp\t\t_false_{}_".format(self.handle))
def generate_location_evaluator(self, location):
if type(location.child) is syntax_tree.Field:
field = location.child
self.generate_location_evaluator(field.location)
offset = field.location.type_object.get_offset(field.variable.name)
if offset:
self.code.append('\t\tpopq\t%rax')
self.code.append("\t\taddq\t${}, %rax".format(offset))
self.code.append('\t\tpushq\t%rax')
elif type(location.child) is syntax_tree.Index:
index = location.child
self.generate_location_evaluator(index.location)
if not type(index.expression.child) is syntax_tree.Number:
self.generate_expression_evaluator(index.expression)
self.code.append('\t\tpopq\t%rcx')
self.code.append('\t\tpopq\t%rax')
self.code.append('\t\tcmpq\t$0, %rcx')
self.new_handle()
self.code.append("\t\tjl\t\t_error_{}_".format(self.handle))
self.code.append("\t\tcmpq\t${}, %rcx".format(index.location.type_object.size))
self.code.append("\t\tjl\t\t_no_error_{}_".format(self.handle))
self.code.append("_error_{}_:".format(self.handle))
self.code.append("\t\tmovq\t${}, %rdi".format(index.expression.line))
self.code.append('\t\tmovq\t%rcx, %rsi')
self.code.append('\t\tjmp\t\t__error_bad_index')
self.code.append("_no_error_{}_:".format(self.handle))
self.code.append("\t\timulq\t${}, %rcx".format(index.type_object.get_size()))
self.code.append('\t\taddq\t%rcx, %rax')
self.code.append('\t\tpushq\t%rax')
self.bad_index = True
else:
value = index.expression.child.table_entry.value
offset = index.location.type_object.get_offset(value)
if offset:
self.code.append('\t\tpopq\t%rax')
self.code.append("\t\taddq\t${}, %rax".format(offset))
self.code.append('\t\tpushq\t%rax')
elif not self.in_procedure:
self.code.append("\t\tmovq\t${}_, %rax".format(location.child.name))
self.code.append('\t\tpushq\t%rax')
else:
variable = location.child
if variable.name in self.local_variables:
offset = self.local_variables.index(variable.name) * INTEGER_SIZE
if not offset:
self.code.append('\t\tpushq\t%rbx')
else:
self.code.append("\t\tleaq\t{}(%rbx), %rax".format(offset))
self.code.append('\t\tpushq\t%rax')
else:
offset = -1
n = 2
if self.formals:
for formal in self.formals[::-1]:
if formal == variable.name:
offset = INTEGER_SIZE * n
break
n += 1
if not offset is -1:
if location.type_object.get_size() > INTEGER_SIZE:
self.code.append("\t\tmovq\t{}(%rbp), %rax".format(offset))
else:
self.code.append("\t\tleaq\t{}(%rbp), %rax".format(offset))
else:
self.code.append("\t\tmovq\t${}_, %rax".format(variable.name))
self.code.append('\t\tpushq\t%rax')
def generate_expression_evaluator(self, expression):
if type(expression.child) is syntax_tree.Number:
self.code.append("\t\tpushq\t${}".format(expression.child.table_entry.value))
elif type(expression.child) is syntax_tree.Location:
self.generate_location_evaluator(expression.child)
self.code.append('\t\tpopq\t%rax')
self.code.append('\t\tmovq\t(%rax), %rcx')
self.code.append('\t\tpushq\t%rcx')
elif type(expression.child) is syntax_tree.Binary:
if expression.child.operator == '+':
self.generate_addition_like_evaluator(expression.child, 'addq')
elif expression.child.operator == '-':
self.generate_addition_like_evaluator(expression.child, 'subq')
elif expression.child.operator == '*':
self.generate_addition_like_evaluator(expression.child, 'imulq')
elif expression.child.operator == 'DIV':
self.generate_division_evaluator(expression.child, '__error_div_by_zero', '%rax')
else: # MOD
self.generate_division_evaluator(expression.child, '__error_mod_by_zero', '%rdx')
elif type(expression.child) is syntax_tree.Call:
self.generate_call(expression.child)
self.code.append('\t\tpushq\t%rax')
def generate_addition_like_evaluator(self, binary, operation):
self.generate_expression_evaluator(binary.expression_left)
if not type(binary.expression_right.child) is syntax_tree.Number:
self.generate_expression_evaluator(binary.expression_right)
self.code.append('\t\tpopq\t%rcx')
self.code.append('\t\tpopq\t%rax')
self.code.append("\t\t{}\t%rcx, %rax".format(operation))
else:
self.code.append('\t\tpopq\t%rax')
value = binary.expression_right.child.table_entry.value
self.code.append("\t\t{}\t${}, %rax".format(operation, value))
self.code.append('\t\tpushq\t%rax')
def generate_division_evaluator(self, binary, error_function, return_register):
self.generate_expression_evaluator(binary.expression_left)
self.generate_expression_evaluator(binary.expression_right)
self.code.append('\t\tpopq\t%rcx')
self.code.append('\t\tpopq\t%rax')
self.new_handle()
if not type(binary.expression_right.child) is syntax_tree.Number:
self.code.append('\t\tcmpq\t$0, %rcx')
self.code.append("\t\tjne\t\t_no_error_{}_".format(self.handle))
self.code.append("\t\tmovq\t${}, %rdi".format(binary.line))
self.code.append("\t\tjmp\t\t{}".format(error_function))
if error_function == '__error_div_by_zero':
self.div_by_zero = True
if error_function == '__error_mod_by_zero':
self.mod_by_zero = True
self.code.append("_no_error_{}_:".format(self.handle))
self.code.append('\t\tmovq\t%rax, %rdx')
self.code.append('\t\tsarq\t$63, %rdx')
self.code.append('\t\tidivq\t%rcx')
self.code.append("\t\tpushq\t{}".format(return_register))
def link_library(self):
evaluated_to_zero = False
stderr_printing = False
write_code = False
if self.div_by_zero:
self.code.append(code_library.error_div_by_zero_code)
decl = '_div_error:\t\t.ascii "error: The right side of the DIV expression on line "'
self.read_only_declarations.append(decl)
evaluated_to_zero = True
stderr_printing = True
if self.mod_by_zero:
self.code.append(code_library.error_mod_by_zero_code)
decl = '_mod_error:\t\t.ascii "error: The right size of the MOD expression on line "'
self.read_only_declarations.append(decl)
evaluated_to_zero = True
stderr_printing = True
if evaluated_to_zero:
self.read_only_declarations.append('_zero_end:\t\t.ascii ") evaluated to zero\\n"')
if self.bad_index:
self.code.append(code_library.error_bad_index_code)
decl = '_index_range:\t.ascii "error: Index out of range: the expression on line "'
self.read_only_declarations.append(decl)
self.read_only_declarations.append('_evaluated_to:\t.ascii " evaluated to "')
stderr_printing = True
if self.write_output:
self.code.append(code_library.write_stdout_code)
write_code = True
if stderr_printing:
self.code.append(code_library.write_stderr_code)
write_code = True
if write_code:
self.code.append(code_library.write_code)
if self.read_input:
self.code.append(code_library.read_code)
self.code.append(code_library.error_bad_input_code)
decl = '_bad_input:\t\t.ascii "error: The input was not an integer\\n"'
self.read_only_declarations.append(decl)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.